From 939fd9774cb23d2b9a4fe5a75e82cf2112902d88 Mon Sep 17 00:00:00 2001 From: Ole Streicher Date: Fri, 18 Jan 2019 09:59:47 +0100 Subject: [PATCH] Import python-fitsio_0.9.12+dfsg.orig.tar.xz [dgit import orig python-fitsio_0.9.12+dfsg.orig.tar.xz] --- LICENSE.txt | 340 ++ MANIFEST.in | 2 + PKG-INFO | 454 +++ README.md | 439 +++ fitsio.egg-info/PKG-INFO | 454 +++ fitsio.egg-info/SOURCES.txt | 183 + fitsio.egg-info/dependency_links.txt | 1 + fitsio.egg-info/requires.txt | 1 + fitsio.egg-info/top_level.txt | 1 + fitsio/__init__.py | 30 + fitsio/fitsio_pywrap.c | 4466 +++++++++++++++++++++++ fitsio/fitslib.py | 4966 ++++++++++++++++++++++++++ fitsio/test.py | 2187 ++++++++++++ fitsio/util.py | 22 + setup.cfg | 4 + setup.py | 242 ++ 16 files changed, 13792 insertions(+) create mode 100644 LICENSE.txt create mode 100644 MANIFEST.in create mode 100644 PKG-INFO create mode 100644 README.md create mode 100644 fitsio.egg-info/PKG-INFO create mode 100644 fitsio.egg-info/SOURCES.txt create mode 100644 fitsio.egg-info/dependency_links.txt create mode 100644 fitsio.egg-info/requires.txt create mode 100644 fitsio.egg-info/top_level.txt create mode 100644 fitsio/__init__.py create mode 100644 fitsio/fitsio_pywrap.c create mode 100644 fitsio/fitslib.py create mode 100644 fitsio/test.py create mode 100644 fitsio/util.py create mode 100644 setup.cfg create mode 100644 setup.py diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..3912109 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..34734b2 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,2 @@ +include *.txt +recursive-include cfitsio3430patch * diff --git a/PKG-INFO b/PKG-INFO new file mode 100644 index 0000000..91b2edf --- /dev/null +++ b/PKG-INFO @@ -0,0 +1,454 @@ +Metadata-Version: 2.1 +Name: fitsio +Version: 0.9.12 +Summary: A full featured python library to read from and write to FITS files. +Home-page: https://github.com/esheldon/fitsio +Author: Erin Scott Sheldon +Author-email: erin.sheldon@gmail.com +License: GPL +Description: A python library to read from and write to FITS files. + + [![Build Status (master)](https://travis-ci.org/esheldon/fitsio.svg?branch=master)](https://travis-ci.org/esheldon/fitsio) + + Do not use numpy 1.10.0 or 1.10.1 + ---------------------------------- + There is a serious performance regression in numpy 1.10 that results + in fitsio running tens to hundreds of times slower. A fix may be + forthcoming in a later release. Please comment here if this + has already impacted your work https://github.com/numpy/numpy/issues/6467 + + Description + ----------- + + This is a python extension written in c and python. Data are read into + numerical python arrays. + + A version of cfitsio is bundled with this package, there is no need to install + your own, nor will this conflict with a version you have installed. + + + Some Features + ------------- + + - Read from and write to image, binary, and ascii table extensions. + - Read arbitrary subsets of table columns and rows without loading all the data + to memory. + - Read image subsets without reading the whole image. Write subsets to existing images. + - Write and read variable length table columns. + - Read images and tables using slice notation similar to numpy arrays. This is like a more + powerful memmap, since it is column-aware for tables. + - Append rows to an existing table. Delete row sets and row ranges. Resize tables, + or insert rows. + - Query the columns and rows in a table. + - Read and write header keywords. + - Read and write images in tile-compressed format (RICE,GZIP,PLIO,HCOMPRESS). + - Read/write gzip files directly. Read unix compress (.Z,.zip) and bzip2 (.bz2) files. + - TDIM information is used to return array columns in the correct shape. + - Write and read string table columns, including array columns of arbitrary + shape. + - Read and write complex, bool (logical), unsigned integer, signed bytes types. + - Write checksums into the header and verify them. + - Insert new columns into tables in-place. + - Iterate over rows in a table. Data are buffered for efficiency. + - python 3 support + + + Examples + -------- + + ```python + import fitsio + from fitsio import FITS,FITSHDR + + # Often you just want to quickly read or write data without bothering to + # create a FITS object. In that case, you can use the read and write + # convienience functions. + + # read all data from the first hdu with data + filename='data.fits' + data = fitsio.read(filename) + + # read a subset of rows and columns from a table + data = fitsio.read(filename, rows=[35,1001], columns=['x','y'], ext=2) + + # read the header, or both at once + h = fitsio.read_header(filename, extension) + data,h = fitsio.read(filename, ext=ext, header=True) + + # open the file, write a new binary table extension, and then write the + # data from "recarray" into the table. By default a new extension is + # added to the file. use clobber=True to overwrite an existing file + # instead. To append rows to an existing table, see below. + fitsio.write(filename, recarray) + + # write an image + fitsio.write(filename, image) + + # NOTE when reading row subsets, the data must still be read from disk. + # This is most efficient if the data are read in the order they appear in + # the file. For this reason, the rows are always returned in row-sorted + # order. + + # + # the FITS class gives the you the ability to explore the data, and gives + # more control + # + + # open a FITS file for reading and explore + fits=fitsio.FITS('data.fits') + + # see what is in here; the FITS object prints itself + print(fits) + + file: data.fits + mode: READONLY + extnum hdutype hduname + 0 IMAGE_HDU + 1 BINARY_TBL mytable + + # at the python prompt, you could just type "fits" and it will automatically + # print itself. Same for ipython. + >>> fits + file: data.fits + ... etc + + # explore the extensions, either by extension number or + # extension name if available + print(fits[0]) + + file: data.fits + extension: 0 + type: IMAGE_HDU + image info: + data type: f8 + dims: [4096,2048] + + print(fits['mytable'] # can also use fits[1]) + + file: data.fits + extension: 1 + type: BINARY_TBL + extname: mytable + rows: 4328342 + column info: + i1scalar u1 + f f4 + fvec f4 array[2] + darr f8 array[3,2] + dvarr f8 varray[10] + s S5 + svec S6 array[3] + svar S0 vstring[8] + sarr S2 array[4,3] + + # See bottom for how to get more information for an extension + + # [-1] to refers the last HDU + print(fits[-1]) + ... + + # if there are multiple HDUs with the same name, and an EXTVER + # is set, you can use it. Here extver=2 + # fits['mytable',2] + + + # read the image from extension zero + img = fits[0].read() + img = fits[0][:,:] + + # read a subset of the image without reading the whole image + img = fits[0][25:35, 45:55] + + + # read all rows and columns from a binary table extension + data = fits[1].read() + data = fits['mytable'].read() + data = fits[1][:] + + # read a subset of rows and columns. By default uses a case-insensitive + # match. The result retains the names with original case. If columns is a + # sequence, a recarray is returned + data = fits[1].read(rows=[1,5], columns=['index','x','y']) + + # Similar but using slice notation + # row subsets + data = fits[1][10:20] + data = fits[1][10:20:2] + data = fits[1][[1,5,18]] + + # all rows of column 'x' + data = fits[1]['x'][:] + + # Read a few columns at once. This is more efficient than separate read for + # each column + data = fits[1]['x','y'][:] + + # General column and row subsets. As noted above, the data are returned + # in row sorted order for efficiency reasons. + columns=['index','x','y'] + rows=[1,5] + data = fits[1][columns][rows] + + # iterate over rows in a table hdu + # faster if we buffer some rows, let's buffer 1000 at a time + fits=fitsio.FITS(filename,iter_row_buffer=1000) + for row in fits[1]: + print(row) + + # iterate over HDUs in a FITS object + for hdu in fits: + data=hdu.read() + + # Note dvarr shows type varray[10] and svar shows type vstring[8]. These + # are variable length columns and the number specified is the maximum size. + # By default they are read into fixed-length fields in the output array. + # You can over-ride this by constructing the FITS object with the vstorage + # keyword or specifying vstorage when reading. Sending vstorage='object' + # will store the data in variable size object fields to save memory; the + # default is vstorage='fixed'. Object fields can also be written out to a + # new FITS file as variable length to save disk space. + + fits = fitsio.FITS(filename,vstorage='object') + # OR + data = fits[1].read(vstorage='object') + print(data['dvarr'].dtype) + dtype('object') + + + # you can grab a FITS HDU object to simplify notation + hdu1 = fits[1] + data = hdu1['x','y'][35:50] + + # get rows that satisfy the input expression. See "Row Filtering + # Specification" in the cfitsio manual (note no temporary table is + # created in this case, contrary to the cfitsio docs) + w=fits[1].where("x > 0.25 && y < 35.0") + data = fits[1][w] + + # read the header + h = fits[0].read_header() + print(h['BITPIX']) + -64 + + fits.close() + + + # now write some data + fits = FITS('test.fits','rw') + + + # create a rec array. Note vstr + # is a variable length string + nrows=35 + data = numpy.zeros(nrows, dtype=[('index','i4'),('vstr','O'),('x','f8'), + ('arr','f4',(3,4))]) + data['index'] = numpy.arange(nrows,dtype='i4') + data['x'] = numpy.random.random(nrows) + data['vstr'] = [str(i) for i in xrange(nrows)] + data['arr'] = numpy.arange(nrows*3*4,dtype='f4').reshape(nrows,3,4) + + # create a new table extension and write the data + fits.write(data) + + # can also be a list of ordinary arrays if you send the names + array_list=[xarray,yarray,namearray] + names=['x','y','name'] + fits.write(array_list, names=names) + + # similarly a dict of arrays + fits.write(dict_of_arrays) + fits.write(dict_of_arrays, names=names) # control name order + + # append more rows to the table. The fields in data2 should match columns + # in the table. missing columns will be filled with zeros + fits[-1].append(data2) + + # insert a new column into a table + fits[-1].insert_column('newcol', data) + + # insert with a specific colnum + fits[-1].insert_column('newcol', data, colnum=2) + + # overwrite rows + fits[-1].write(data) + + # overwrite starting at a particular row. The table will grow if needed + fits[-1].write(data, firstrow=350) + + + # create an image + img=numpy.arange(2*3,dtype='i4').reshape(2,3) + + # write an image in a new HDU (if this is a new file, the primary HDU) + fits.write(img) + + # write an image with rice compression + fits.write(img, compress='rice') + + # overwrite the image + fits[ext].write(img2) + + # write into an existing image, starting at the location [300,400] + # the image will be expanded if needed + fits[ext].write(img3, start=[300,400]) + + # change the shape of the image on disk + fits[ext].reshape([250,100]) + + # add checksums for the data + fits[-1].write_checksum() + + # can later verify data integridy + fits[-1].verify_checksum() + + # you can also write a header at the same time. The header can be + # - a simple dict (no comments) + # - a list of dicts with 'name','value','comment' fields + # - a FITSHDR object + + hdict = {'somekey': 35, 'location': 'kitt peak'} + fits.write(data, header=hdict) + hlist = [{'name':'observer', 'value':'ES', 'comment':'who'}, + {'name':'location','value':'CTIO'}, + {'name':'photometric','value':True}] + fits.write(data, header=hlist) + hdr=FITSHDR(hlist) + fits.write(data, header=hdr) + + # you can add individual keys to an existing HDU + fits[1].write_key(name, value, comment="my comment") + + # Write multiple header keys to an existing HDU. Here records + # is the same as sent with header= above + fits[1].write_keys(records) + + # write special COMMENT fields + fits[1].write_comment("observer JS") + fits[1].write_comment("we had good weather") + + # write special history fields + fits[1].write_history("processed with software X") + fits[1].write_history("re-processed with software Y") + + fits.close() + + # using a context, the file is closed automatically after leaving the block + with FITS('path/to/file') as fits: + data = fits[ext].read() + + # you can check if a header exists using "in": + if 'blah' in fits: + data=fits['blah'].read() + if 2 in f: + data=fits[2].read() + + # methods to get more information about extension. For extension 1: + f[1].get_info() # lots of info about the extension + f[1].has_data() # returns True if data is present in extension + f[1].get_extname() + f[1].get_extver() + f[1].get_extnum() # return zero-offset extension number + f[1].get_exttype() # 'BINARY_TBL' or 'ASCII_TBL' or 'IMAGE_HDU' + f[1].get_offsets() # byte offsets (header_start, data_start, data_end) + f[1].is_compressed() # for images. True if tile-compressed + f[1].get_colnames() # for tables + f[1].get_colname(colnum) # for tables find the name from column number + f[1].get_nrows() # for tables + f[1].get_rec_dtype() # for tables + f[1].get_rec_column_descr() # for tables + f[1].get_vstorage() # for tables, storage mechanism for variable + # length columns + + # public attributes you can feel free to change as needed + f[1].lower # If True, lower case colnames on output + f[1].upper # If True, upper case colnames on output + f[1].case_sensitive # if True, names are matched case sensitive + ``` + Installation + ------------ + + The easiest way is using pip or conda. To get the latest release + + pip install fitsio + + # update fitsio (and everything else) + pip install fitsio --upgrade + + # if pip refuses to update to a newer version + pip install fitsio --upgrade --ignore-installed + + # if you only want to upgrade fitsio + pip install fitsio --no-deps --upgrade --ignore-installed + + # for conda, use conda-forge + conda install -c conda-forge fitsio + + You can also get the latest source tarball release from + + https://pypi.python.org/pypi/fitsio + + or the bleeding edge source from github or use git. To check out + the code for the first time + + git clone https://github.com/esheldon/fitsio.git + + Or at a later time to update to the latest + + cd fitsio + git update + + Use tar xvfz to untar the file, enter the fitsio directory and type + + python setup.py install + + optionally with a prefix + + python setup.py install --prefix=/some/path + + Requirements + ------------ + + - python 2 or python 3 + - you need a c compiler and build tools like Make + - You need numerical python (numpy). + + Tests + ----- + The unit tests should all pass for full support. + + import fitsio + fitsio.test.test() + + Some tests may fail if certain libraries are not available, such + as bzip2. This failure only implies that bzipped files cannot + be read, without affecting other functionality. + + TODO + ---- + + - HDU groups: does anyone use these? If so open an issue! + + Notes on cfitsio bundling + ------------------------- + + We bundle partly because many deployed versions of cfitsio in the wild do not + have support for interesting features like tiled image compression. Bundling + a version that meets our needs is a safe alternative. + + Note on array ordering + ---------------------- + + Since numpy uses C order, FITS uses fortran order, we have to write the TDIM + and image dimensions in reverse order, but write the data as is. Then we need + to also reverse the dims as read from the header when creating the numpy dtype, + but read as is. + + + + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: GNU General Public License (GPL) +Classifier: Topic :: Scientific/Engineering :: Astronomy +Classifier: Intended Audience :: Science/Research +Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM diff --git a/README.md b/README.md new file mode 100644 index 0000000..248e117 --- /dev/null +++ b/README.md @@ -0,0 +1,439 @@ +A python library to read from and write to FITS files. + +[![Build Status (master)](https://travis-ci.org/esheldon/fitsio.svg?branch=master)](https://travis-ci.org/esheldon/fitsio) + +Do not use numpy 1.10.0 or 1.10.1 +---------------------------------- +There is a serious performance regression in numpy 1.10 that results +in fitsio running tens to hundreds of times slower. A fix may be +forthcoming in a later release. Please comment here if this +has already impacted your work https://github.com/numpy/numpy/issues/6467 + +Description +----------- + +This is a python extension written in c and python. Data are read into +numerical python arrays. + +A version of cfitsio is bundled with this package, there is no need to install +your own, nor will this conflict with a version you have installed. + + +Some Features +------------- + +- Read from and write to image, binary, and ascii table extensions. +- Read arbitrary subsets of table columns and rows without loading all the data + to memory. +- Read image subsets without reading the whole image. Write subsets to existing images. +- Write and read variable length table columns. +- Read images and tables using slice notation similar to numpy arrays. This is like a more + powerful memmap, since it is column-aware for tables. +- Append rows to an existing table. Delete row sets and row ranges. Resize tables, + or insert rows. +- Query the columns and rows in a table. +- Read and write header keywords. +- Read and write images in tile-compressed format (RICE,GZIP,PLIO,HCOMPRESS). +- Read/write gzip files directly. Read unix compress (.Z,.zip) and bzip2 (.bz2) files. +- TDIM information is used to return array columns in the correct shape. +- Write and read string table columns, including array columns of arbitrary + shape. +- Read and write complex, bool (logical), unsigned integer, signed bytes types. +- Write checksums into the header and verify them. +- Insert new columns into tables in-place. +- Iterate over rows in a table. Data are buffered for efficiency. +- python 3 support + + +Examples +-------- + +```python +import fitsio +from fitsio import FITS,FITSHDR + +# Often you just want to quickly read or write data without bothering to +# create a FITS object. In that case, you can use the read and write +# convienience functions. + +# read all data from the first hdu with data +filename='data.fits' +data = fitsio.read(filename) + +# read a subset of rows and columns from a table +data = fitsio.read(filename, rows=[35,1001], columns=['x','y'], ext=2) + +# read the header, or both at once +h = fitsio.read_header(filename, extension) +data,h = fitsio.read(filename, ext=ext, header=True) + +# open the file, write a new binary table extension, and then write the +# data from "recarray" into the table. By default a new extension is +# added to the file. use clobber=True to overwrite an existing file +# instead. To append rows to an existing table, see below. +fitsio.write(filename, recarray) + +# write an image +fitsio.write(filename, image) + +# NOTE when reading row subsets, the data must still be read from disk. +# This is most efficient if the data are read in the order they appear in +# the file. For this reason, the rows are always returned in row-sorted +# order. + +# +# the FITS class gives the you the ability to explore the data, and gives +# more control +# + +# open a FITS file for reading and explore +fits=fitsio.FITS('data.fits') + +# see what is in here; the FITS object prints itself +print(fits) + +file: data.fits +mode: READONLY +extnum hdutype hduname +0 IMAGE_HDU +1 BINARY_TBL mytable + +# at the python prompt, you could just type "fits" and it will automatically +# print itself. Same for ipython. +>>> fits +file: data.fits +... etc + +# explore the extensions, either by extension number or +# extension name if available +print(fits[0]) + +file: data.fits +extension: 0 +type: IMAGE_HDU +image info: + data type: f8 + dims: [4096,2048] + +print(fits['mytable'] # can also use fits[1]) + +file: data.fits +extension: 1 +type: BINARY_TBL +extname: mytable +rows: 4328342 +column info: + i1scalar u1 + f f4 + fvec f4 array[2] + darr f8 array[3,2] + dvarr f8 varray[10] + s S5 + svec S6 array[3] + svar S0 vstring[8] + sarr S2 array[4,3] + +# See bottom for how to get more information for an extension + +# [-1] to refers the last HDU +print(fits[-1]) +... + +# if there are multiple HDUs with the same name, and an EXTVER +# is set, you can use it. Here extver=2 +# fits['mytable',2] + + +# read the image from extension zero +img = fits[0].read() +img = fits[0][:,:] + +# read a subset of the image without reading the whole image +img = fits[0][25:35, 45:55] + + +# read all rows and columns from a binary table extension +data = fits[1].read() +data = fits['mytable'].read() +data = fits[1][:] + +# read a subset of rows and columns. By default uses a case-insensitive +# match. The result retains the names with original case. If columns is a +# sequence, a recarray is returned +data = fits[1].read(rows=[1,5], columns=['index','x','y']) + +# Similar but using slice notation +# row subsets +data = fits[1][10:20] +data = fits[1][10:20:2] +data = fits[1][[1,5,18]] + +# all rows of column 'x' +data = fits[1]['x'][:] + +# Read a few columns at once. This is more efficient than separate read for +# each column +data = fits[1]['x','y'][:] + +# General column and row subsets. As noted above, the data are returned +# in row sorted order for efficiency reasons. +columns=['index','x','y'] +rows=[1,5] +data = fits[1][columns][rows] + +# iterate over rows in a table hdu +# faster if we buffer some rows, let's buffer 1000 at a time +fits=fitsio.FITS(filename,iter_row_buffer=1000) +for row in fits[1]: + print(row) + +# iterate over HDUs in a FITS object +for hdu in fits: + data=hdu.read() + +# Note dvarr shows type varray[10] and svar shows type vstring[8]. These +# are variable length columns and the number specified is the maximum size. +# By default they are read into fixed-length fields in the output array. +# You can over-ride this by constructing the FITS object with the vstorage +# keyword or specifying vstorage when reading. Sending vstorage='object' +# will store the data in variable size object fields to save memory; the +# default is vstorage='fixed'. Object fields can also be written out to a +# new FITS file as variable length to save disk space. + +fits = fitsio.FITS(filename,vstorage='object') +# OR +data = fits[1].read(vstorage='object') +print(data['dvarr'].dtype) + dtype('object') + + +# you can grab a FITS HDU object to simplify notation +hdu1 = fits[1] +data = hdu1['x','y'][35:50] + +# get rows that satisfy the input expression. See "Row Filtering +# Specification" in the cfitsio manual (note no temporary table is +# created in this case, contrary to the cfitsio docs) +w=fits[1].where("x > 0.25 && y < 35.0") +data = fits[1][w] + +# read the header +h = fits[0].read_header() +print(h['BITPIX']) + -64 + +fits.close() + + +# now write some data +fits = FITS('test.fits','rw') + + +# create a rec array. Note vstr +# is a variable length string +nrows=35 +data = numpy.zeros(nrows, dtype=[('index','i4'),('vstr','O'),('x','f8'), + ('arr','f4',(3,4))]) +data['index'] = numpy.arange(nrows,dtype='i4') +data['x'] = numpy.random.random(nrows) +data['vstr'] = [str(i) for i in xrange(nrows)] +data['arr'] = numpy.arange(nrows*3*4,dtype='f4').reshape(nrows,3,4) + +# create a new table extension and write the data +fits.write(data) + +# can also be a list of ordinary arrays if you send the names +array_list=[xarray,yarray,namearray] +names=['x','y','name'] +fits.write(array_list, names=names) + +# similarly a dict of arrays +fits.write(dict_of_arrays) +fits.write(dict_of_arrays, names=names) # control name order + +# append more rows to the table. The fields in data2 should match columns +# in the table. missing columns will be filled with zeros +fits[-1].append(data2) + +# insert a new column into a table +fits[-1].insert_column('newcol', data) + +# insert with a specific colnum +fits[-1].insert_column('newcol', data, colnum=2) + +# overwrite rows +fits[-1].write(data) + +# overwrite starting at a particular row. The table will grow if needed +fits[-1].write(data, firstrow=350) + + +# create an image +img=numpy.arange(2*3,dtype='i4').reshape(2,3) + +# write an image in a new HDU (if this is a new file, the primary HDU) +fits.write(img) + +# write an image with rice compression +fits.write(img, compress='rice') + +# overwrite the image +fits[ext].write(img2) + +# write into an existing image, starting at the location [300,400] +# the image will be expanded if needed +fits[ext].write(img3, start=[300,400]) + +# change the shape of the image on disk +fits[ext].reshape([250,100]) + +# add checksums for the data +fits[-1].write_checksum() + +# can later verify data integridy +fits[-1].verify_checksum() + +# you can also write a header at the same time. The header can be +# - a simple dict (no comments) +# - a list of dicts with 'name','value','comment' fields +# - a FITSHDR object + +hdict = {'somekey': 35, 'location': 'kitt peak'} +fits.write(data, header=hdict) +hlist = [{'name':'observer', 'value':'ES', 'comment':'who'}, + {'name':'location','value':'CTIO'}, + {'name':'photometric','value':True}] +fits.write(data, header=hlist) +hdr=FITSHDR(hlist) +fits.write(data, header=hdr) + +# you can add individual keys to an existing HDU +fits[1].write_key(name, value, comment="my comment") + +# Write multiple header keys to an existing HDU. Here records +# is the same as sent with header= above +fits[1].write_keys(records) + +# write special COMMENT fields +fits[1].write_comment("observer JS") +fits[1].write_comment("we had good weather") + +# write special history fields +fits[1].write_history("processed with software X") +fits[1].write_history("re-processed with software Y") + +fits.close() + +# using a context, the file is closed automatically after leaving the block +with FITS('path/to/file') as fits: + data = fits[ext].read() + + # you can check if a header exists using "in": + if 'blah' in fits: + data=fits['blah'].read() + if 2 in f: + data=fits[2].read() + +# methods to get more information about extension. For extension 1: +f[1].get_info() # lots of info about the extension +f[1].has_data() # returns True if data is present in extension +f[1].get_extname() +f[1].get_extver() +f[1].get_extnum() # return zero-offset extension number +f[1].get_exttype() # 'BINARY_TBL' or 'ASCII_TBL' or 'IMAGE_HDU' +f[1].get_offsets() # byte offsets (header_start, data_start, data_end) +f[1].is_compressed() # for images. True if tile-compressed +f[1].get_colnames() # for tables +f[1].get_colname(colnum) # for tables find the name from column number +f[1].get_nrows() # for tables +f[1].get_rec_dtype() # for tables +f[1].get_rec_column_descr() # for tables +f[1].get_vstorage() # for tables, storage mechanism for variable + # length columns + +# public attributes you can feel free to change as needed +f[1].lower # If True, lower case colnames on output +f[1].upper # If True, upper case colnames on output +f[1].case_sensitive # if True, names are matched case sensitive +``` +Installation +------------ + +The easiest way is using pip or conda. To get the latest release + + pip install fitsio + + # update fitsio (and everything else) + pip install fitsio --upgrade + + # if pip refuses to update to a newer version + pip install fitsio --upgrade --ignore-installed + + # if you only want to upgrade fitsio + pip install fitsio --no-deps --upgrade --ignore-installed + + # for conda, use conda-forge + conda install -c conda-forge fitsio + +You can also get the latest source tarball release from + + https://pypi.python.org/pypi/fitsio + +or the bleeding edge source from github or use git. To check out +the code for the first time + + git clone https://github.com/esheldon/fitsio.git + +Or at a later time to update to the latest + + cd fitsio + git update + +Use tar xvfz to untar the file, enter the fitsio directory and type + + python setup.py install + +optionally with a prefix + + python setup.py install --prefix=/some/path + +Requirements +------------ + + - python 2 or python 3 + - you need a c compiler and build tools like Make + - You need numerical python (numpy). + +Tests +----- +The unit tests should all pass for full support. + + import fitsio + fitsio.test.test() + +Some tests may fail if certain libraries are not available, such +as bzip2. This failure only implies that bzipped files cannot +be read, without affecting other functionality. + +TODO +---- + +- HDU groups: does anyone use these? If so open an issue! + +Notes on cfitsio bundling +------------------------- + +We bundle partly because many deployed versions of cfitsio in the wild do not +have support for interesting features like tiled image compression. Bundling +a version that meets our needs is a safe alternative. + +Note on array ordering +---------------------- + +Since numpy uses C order, FITS uses fortran order, we have to write the TDIM +and image dimensions in reverse order, but write the data as is. Then we need +to also reverse the dims as read from the header when creating the numpy dtype, +but read as is. + + + diff --git a/fitsio.egg-info/PKG-INFO b/fitsio.egg-info/PKG-INFO new file mode 100644 index 0000000..91b2edf --- /dev/null +++ b/fitsio.egg-info/PKG-INFO @@ -0,0 +1,454 @@ +Metadata-Version: 2.1 +Name: fitsio +Version: 0.9.12 +Summary: A full featured python library to read from and write to FITS files. +Home-page: https://github.com/esheldon/fitsio +Author: Erin Scott Sheldon +Author-email: erin.sheldon@gmail.com +License: GPL +Description: A python library to read from and write to FITS files. + + [![Build Status (master)](https://travis-ci.org/esheldon/fitsio.svg?branch=master)](https://travis-ci.org/esheldon/fitsio) + + Do not use numpy 1.10.0 or 1.10.1 + ---------------------------------- + There is a serious performance regression in numpy 1.10 that results + in fitsio running tens to hundreds of times slower. A fix may be + forthcoming in a later release. Please comment here if this + has already impacted your work https://github.com/numpy/numpy/issues/6467 + + Description + ----------- + + This is a python extension written in c and python. Data are read into + numerical python arrays. + + A version of cfitsio is bundled with this package, there is no need to install + your own, nor will this conflict with a version you have installed. + + + Some Features + ------------- + + - Read from and write to image, binary, and ascii table extensions. + - Read arbitrary subsets of table columns and rows without loading all the data + to memory. + - Read image subsets without reading the whole image. Write subsets to existing images. + - Write and read variable length table columns. + - Read images and tables using slice notation similar to numpy arrays. This is like a more + powerful memmap, since it is column-aware for tables. + - Append rows to an existing table. Delete row sets and row ranges. Resize tables, + or insert rows. + - Query the columns and rows in a table. + - Read and write header keywords. + - Read and write images in tile-compressed format (RICE,GZIP,PLIO,HCOMPRESS). + - Read/write gzip files directly. Read unix compress (.Z,.zip) and bzip2 (.bz2) files. + - TDIM information is used to return array columns in the correct shape. + - Write and read string table columns, including array columns of arbitrary + shape. + - Read and write complex, bool (logical), unsigned integer, signed bytes types. + - Write checksums into the header and verify them. + - Insert new columns into tables in-place. + - Iterate over rows in a table. Data are buffered for efficiency. + - python 3 support + + + Examples + -------- + + ```python + import fitsio + from fitsio import FITS,FITSHDR + + # Often you just want to quickly read or write data without bothering to + # create a FITS object. In that case, you can use the read and write + # convienience functions. + + # read all data from the first hdu with data + filename='data.fits' + data = fitsio.read(filename) + + # read a subset of rows and columns from a table + data = fitsio.read(filename, rows=[35,1001], columns=['x','y'], ext=2) + + # read the header, or both at once + h = fitsio.read_header(filename, extension) + data,h = fitsio.read(filename, ext=ext, header=True) + + # open the file, write a new binary table extension, and then write the + # data from "recarray" into the table. By default a new extension is + # added to the file. use clobber=True to overwrite an existing file + # instead. To append rows to an existing table, see below. + fitsio.write(filename, recarray) + + # write an image + fitsio.write(filename, image) + + # NOTE when reading row subsets, the data must still be read from disk. + # This is most efficient if the data are read in the order they appear in + # the file. For this reason, the rows are always returned in row-sorted + # order. + + # + # the FITS class gives the you the ability to explore the data, and gives + # more control + # + + # open a FITS file for reading and explore + fits=fitsio.FITS('data.fits') + + # see what is in here; the FITS object prints itself + print(fits) + + file: data.fits + mode: READONLY + extnum hdutype hduname + 0 IMAGE_HDU + 1 BINARY_TBL mytable + + # at the python prompt, you could just type "fits" and it will automatically + # print itself. Same for ipython. + >>> fits + file: data.fits + ... etc + + # explore the extensions, either by extension number or + # extension name if available + print(fits[0]) + + file: data.fits + extension: 0 + type: IMAGE_HDU + image info: + data type: f8 + dims: [4096,2048] + + print(fits['mytable'] # can also use fits[1]) + + file: data.fits + extension: 1 + type: BINARY_TBL + extname: mytable + rows: 4328342 + column info: + i1scalar u1 + f f4 + fvec f4 array[2] + darr f8 array[3,2] + dvarr f8 varray[10] + s S5 + svec S6 array[3] + svar S0 vstring[8] + sarr S2 array[4,3] + + # See bottom for how to get more information for an extension + + # [-1] to refers the last HDU + print(fits[-1]) + ... + + # if there are multiple HDUs with the same name, and an EXTVER + # is set, you can use it. Here extver=2 + # fits['mytable',2] + + + # read the image from extension zero + img = fits[0].read() + img = fits[0][:,:] + + # read a subset of the image without reading the whole image + img = fits[0][25:35, 45:55] + + + # read all rows and columns from a binary table extension + data = fits[1].read() + data = fits['mytable'].read() + data = fits[1][:] + + # read a subset of rows and columns. By default uses a case-insensitive + # match. The result retains the names with original case. If columns is a + # sequence, a recarray is returned + data = fits[1].read(rows=[1,5], columns=['index','x','y']) + + # Similar but using slice notation + # row subsets + data = fits[1][10:20] + data = fits[1][10:20:2] + data = fits[1][[1,5,18]] + + # all rows of column 'x' + data = fits[1]['x'][:] + + # Read a few columns at once. This is more efficient than separate read for + # each column + data = fits[1]['x','y'][:] + + # General column and row subsets. As noted above, the data are returned + # in row sorted order for efficiency reasons. + columns=['index','x','y'] + rows=[1,5] + data = fits[1][columns][rows] + + # iterate over rows in a table hdu + # faster if we buffer some rows, let's buffer 1000 at a time + fits=fitsio.FITS(filename,iter_row_buffer=1000) + for row in fits[1]: + print(row) + + # iterate over HDUs in a FITS object + for hdu in fits: + data=hdu.read() + + # Note dvarr shows type varray[10] and svar shows type vstring[8]. These + # are variable length columns and the number specified is the maximum size. + # By default they are read into fixed-length fields in the output array. + # You can over-ride this by constructing the FITS object with the vstorage + # keyword or specifying vstorage when reading. Sending vstorage='object' + # will store the data in variable size object fields to save memory; the + # default is vstorage='fixed'. Object fields can also be written out to a + # new FITS file as variable length to save disk space. + + fits = fitsio.FITS(filename,vstorage='object') + # OR + data = fits[1].read(vstorage='object') + print(data['dvarr'].dtype) + dtype('object') + + + # you can grab a FITS HDU object to simplify notation + hdu1 = fits[1] + data = hdu1['x','y'][35:50] + + # get rows that satisfy the input expression. See "Row Filtering + # Specification" in the cfitsio manual (note no temporary table is + # created in this case, contrary to the cfitsio docs) + w=fits[1].where("x > 0.25 && y < 35.0") + data = fits[1][w] + + # read the header + h = fits[0].read_header() + print(h['BITPIX']) + -64 + + fits.close() + + + # now write some data + fits = FITS('test.fits','rw') + + + # create a rec array. Note vstr + # is a variable length string + nrows=35 + data = numpy.zeros(nrows, dtype=[('index','i4'),('vstr','O'),('x','f8'), + ('arr','f4',(3,4))]) + data['index'] = numpy.arange(nrows,dtype='i4') + data['x'] = numpy.random.random(nrows) + data['vstr'] = [str(i) for i in xrange(nrows)] + data['arr'] = numpy.arange(nrows*3*4,dtype='f4').reshape(nrows,3,4) + + # create a new table extension and write the data + fits.write(data) + + # can also be a list of ordinary arrays if you send the names + array_list=[xarray,yarray,namearray] + names=['x','y','name'] + fits.write(array_list, names=names) + + # similarly a dict of arrays + fits.write(dict_of_arrays) + fits.write(dict_of_arrays, names=names) # control name order + + # append more rows to the table. The fields in data2 should match columns + # in the table. missing columns will be filled with zeros + fits[-1].append(data2) + + # insert a new column into a table + fits[-1].insert_column('newcol', data) + + # insert with a specific colnum + fits[-1].insert_column('newcol', data, colnum=2) + + # overwrite rows + fits[-1].write(data) + + # overwrite starting at a particular row. The table will grow if needed + fits[-1].write(data, firstrow=350) + + + # create an image + img=numpy.arange(2*3,dtype='i4').reshape(2,3) + + # write an image in a new HDU (if this is a new file, the primary HDU) + fits.write(img) + + # write an image with rice compression + fits.write(img, compress='rice') + + # overwrite the image + fits[ext].write(img2) + + # write into an existing image, starting at the location [300,400] + # the image will be expanded if needed + fits[ext].write(img3, start=[300,400]) + + # change the shape of the image on disk + fits[ext].reshape([250,100]) + + # add checksums for the data + fits[-1].write_checksum() + + # can later verify data integridy + fits[-1].verify_checksum() + + # you can also write a header at the same time. The header can be + # - a simple dict (no comments) + # - a list of dicts with 'name','value','comment' fields + # - a FITSHDR object + + hdict = {'somekey': 35, 'location': 'kitt peak'} + fits.write(data, header=hdict) + hlist = [{'name':'observer', 'value':'ES', 'comment':'who'}, + {'name':'location','value':'CTIO'}, + {'name':'photometric','value':True}] + fits.write(data, header=hlist) + hdr=FITSHDR(hlist) + fits.write(data, header=hdr) + + # you can add individual keys to an existing HDU + fits[1].write_key(name, value, comment="my comment") + + # Write multiple header keys to an existing HDU. Here records + # is the same as sent with header= above + fits[1].write_keys(records) + + # write special COMMENT fields + fits[1].write_comment("observer JS") + fits[1].write_comment("we had good weather") + + # write special history fields + fits[1].write_history("processed with software X") + fits[1].write_history("re-processed with software Y") + + fits.close() + + # using a context, the file is closed automatically after leaving the block + with FITS('path/to/file') as fits: + data = fits[ext].read() + + # you can check if a header exists using "in": + if 'blah' in fits: + data=fits['blah'].read() + if 2 in f: + data=fits[2].read() + + # methods to get more information about extension. For extension 1: + f[1].get_info() # lots of info about the extension + f[1].has_data() # returns True if data is present in extension + f[1].get_extname() + f[1].get_extver() + f[1].get_extnum() # return zero-offset extension number + f[1].get_exttype() # 'BINARY_TBL' or 'ASCII_TBL' or 'IMAGE_HDU' + f[1].get_offsets() # byte offsets (header_start, data_start, data_end) + f[1].is_compressed() # for images. True if tile-compressed + f[1].get_colnames() # for tables + f[1].get_colname(colnum) # for tables find the name from column number + f[1].get_nrows() # for tables + f[1].get_rec_dtype() # for tables + f[1].get_rec_column_descr() # for tables + f[1].get_vstorage() # for tables, storage mechanism for variable + # length columns + + # public attributes you can feel free to change as needed + f[1].lower # If True, lower case colnames on output + f[1].upper # If True, upper case colnames on output + f[1].case_sensitive # if True, names are matched case sensitive + ``` + Installation + ------------ + + The easiest way is using pip or conda. To get the latest release + + pip install fitsio + + # update fitsio (and everything else) + pip install fitsio --upgrade + + # if pip refuses to update to a newer version + pip install fitsio --upgrade --ignore-installed + + # if you only want to upgrade fitsio + pip install fitsio --no-deps --upgrade --ignore-installed + + # for conda, use conda-forge + conda install -c conda-forge fitsio + + You can also get the latest source tarball release from + + https://pypi.python.org/pypi/fitsio + + or the bleeding edge source from github or use git. To check out + the code for the first time + + git clone https://github.com/esheldon/fitsio.git + + Or at a later time to update to the latest + + cd fitsio + git update + + Use tar xvfz to untar the file, enter the fitsio directory and type + + python setup.py install + + optionally with a prefix + + python setup.py install --prefix=/some/path + + Requirements + ------------ + + - python 2 or python 3 + - you need a c compiler and build tools like Make + - You need numerical python (numpy). + + Tests + ----- + The unit tests should all pass for full support. + + import fitsio + fitsio.test.test() + + Some tests may fail if certain libraries are not available, such + as bzip2. This failure only implies that bzipped files cannot + be read, without affecting other functionality. + + TODO + ---- + + - HDU groups: does anyone use these? If so open an issue! + + Notes on cfitsio bundling + ------------------------- + + We bundle partly because many deployed versions of cfitsio in the wild do not + have support for interesting features like tiled image compression. Bundling + a version that meets our needs is a safe alternative. + + Note on array ordering + ---------------------- + + Since numpy uses C order, FITS uses fortran order, we have to write the TDIM + and image dimensions in reverse order, but write the data as is. Then we need + to also reverse the dims as read from the header when creating the numpy dtype, + but read as is. + + + + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: GNU General Public License (GPL) +Classifier: Topic :: Scientific/Engineering :: Astronomy +Classifier: Intended Audience :: Science/Research +Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM diff --git a/fitsio.egg-info/SOURCES.txt b/fitsio.egg-info/SOURCES.txt new file mode 100644 index 0000000..47a2c17 --- /dev/null +++ b/fitsio.egg-info/SOURCES.txt @@ -0,0 +1,183 @@ +LICENSE.txt +MANIFEST.in +README.md +setup.py +cfitsio3430patch/CMakeLists.txt +cfitsio3430patch/FindPthreads.cmake +cfitsio3430patch/License.txt +cfitsio3430patch/Makefile.in +cfitsio3430patch/README +cfitsio3430patch/README.MacOS +cfitsio3430patch/README.win +cfitsio3430patch/README_OLD.win +cfitsio3430patch/buffers.c +cfitsio3430patch/cfileio.c +cfitsio3430patch/cfitsio.pc.in +cfitsio3430patch/cfitsio_mac.sit.hqx +cfitsio3430patch/cfortran.h +cfitsio3430patch/checksum.c +cfitsio3430patch/config.guess +cfitsio3430patch/config.sub +cfitsio3430patch/configure +cfitsio3430patch/configure.in +cfitsio3430patch/cookbook.c +cfitsio3430patch/cookbook.f +cfitsio3430patch/drvrfile.c +cfitsio3430patch/drvrgsiftp.c +cfitsio3430patch/drvrgsiftp.h +cfitsio3430patch/drvrmem.c +cfitsio3430patch/drvrnet.c +cfitsio3430patch/drvrsmem.c +cfitsio3430patch/drvrsmem.h +cfitsio3430patch/editcol.c +cfitsio3430patch/edithdu.c +cfitsio3430patch/eval.l +cfitsio3430patch/eval.y +cfitsio3430patch/eval_defs.h +cfitsio3430patch/eval_f.c +cfitsio3430patch/eval_l.c +cfitsio3430patch/eval_tab.h +cfitsio3430patch/eval_y.c +cfitsio3430patch/f77.inc +cfitsio3430patch/f77_wrap.h +cfitsio3430patch/f77_wrap1.c +cfitsio3430patch/f77_wrap2.c +cfitsio3430patch/f77_wrap3.c +cfitsio3430patch/f77_wrap4.c +cfitsio3430patch/fits_hcompress.c +cfitsio3430patch/fits_hdecompress.c +cfitsio3430patch/fitscopy.c +cfitsio3430patch/fitscore.c +cfitsio3430patch/fitsio.h +cfitsio3430patch/fitsio2.h +cfitsio3430patch/fpack.c +cfitsio3430patch/fpack.h +cfitsio3430patch/fpackutil.c +cfitsio3430patch/funpack.c +cfitsio3430patch/getcol.c +cfitsio3430patch/getcolb.c +cfitsio3430patch/getcold.c +cfitsio3430patch/getcole.c +cfitsio3430patch/getcoli.c +cfitsio3430patch/getcolj.c +cfitsio3430patch/getcolk.c +cfitsio3430patch/getcoll.c +cfitsio3430patch/getcols.c +cfitsio3430patch/getcolsb.c +cfitsio3430patch/getcolui.c +cfitsio3430patch/getcoluj.c +cfitsio3430patch/getcoluk.c +cfitsio3430patch/getkey.c +cfitsio3430patch/group.c +cfitsio3430patch/group.h +cfitsio3430patch/grparser.c +cfitsio3430patch/grparser.h +cfitsio3430patch/histo.c +cfitsio3430patch/imcompress.c +cfitsio3430patch/imcopy.c +cfitsio3430patch/install-sh +cfitsio3430patch/iraffits.c +cfitsio3430patch/iter_a.c +cfitsio3430patch/iter_a.f +cfitsio3430patch/iter_a.fit +cfitsio3430patch/iter_b.c +cfitsio3430patch/iter_b.f +cfitsio3430patch/iter_b.fit +cfitsio3430patch/iter_c.c +cfitsio3430patch/iter_c.f +cfitsio3430patch/iter_c.fit +cfitsio3430patch/iter_image.c +cfitsio3430patch/iter_var.c +cfitsio3430patch/longnam.h +cfitsio3430patch/makefile.bc +cfitsio3430patch/makefile.vcc +cfitsio3430patch/makepc.bat +cfitsio3430patch/modkey.c +cfitsio3430patch/pliocomp.c +cfitsio3430patch/putcol.c +cfitsio3430patch/putcolb.c +cfitsio3430patch/putcold.c +cfitsio3430patch/putcole.c +cfitsio3430patch/putcoli.c +cfitsio3430patch/putcolj.c +cfitsio3430patch/putcolk.c +cfitsio3430patch/putcoll.c +cfitsio3430patch/putcols.c +cfitsio3430patch/putcolsb.c +cfitsio3430patch/putcolu.c +cfitsio3430patch/putcolui.c +cfitsio3430patch/putcoluj.c +cfitsio3430patch/putcoluk.c +cfitsio3430patch/putkey.c +cfitsio3430patch/quantize.c +cfitsio3430patch/region.c +cfitsio3430patch/region.h +cfitsio3430patch/ricecomp.c +cfitsio3430patch/sample.tpl +cfitsio3430patch/scalnull.c +cfitsio3430patch/simplerng.c +cfitsio3430patch/simplerng.h +cfitsio3430patch/smem.c +cfitsio3430patch/speed.c +cfitsio3430patch/swapproc.c +cfitsio3430patch/testf77.f +cfitsio3430patch/testf77.out +cfitsio3430patch/testf77.std +cfitsio3430patch/testprog.c +cfitsio3430patch/testprog.out +cfitsio3430patch/testprog.std +cfitsio3430patch/testprog.tpt +cfitsio3430patch/vmsieee.c +cfitsio3430patch/wcssub.c +cfitsio3430patch/wcsutil.c +cfitsio3430patch/winDumpExts.mak +cfitsio3430patch/windumpexts.c +cfitsio3430patch/cfitsio.xcodeproj/project.pbxproj +cfitsio3430patch/docs/cfitsio.pdf +cfitsio3430patch/docs/cfitsio.ps +cfitsio3430patch/docs/cfitsio.tex +cfitsio3430patch/docs/cfitsio.toc +cfitsio3430patch/docs/cfortran.doc +cfitsio3430patch/docs/changes.txt +cfitsio3430patch/docs/fitsio.doc +cfitsio3430patch/docs/fitsio.pdf +cfitsio3430patch/docs/fitsio.ps +cfitsio3430patch/docs/fitsio.tex +cfitsio3430patch/docs/fitsio.toc +cfitsio3430patch/docs/fpackguide.pdf +cfitsio3430patch/docs/quick.pdf +cfitsio3430patch/docs/quick.ps +cfitsio3430patch/docs/quick.tex +cfitsio3430patch/docs/quick.toc +cfitsio3430patch/zlib/adler32.c +cfitsio3430patch/zlib/crc32.c +cfitsio3430patch/zlib/crc32.h +cfitsio3430patch/zlib/deflate.c +cfitsio3430patch/zlib/deflate.h +cfitsio3430patch/zlib/infback.c +cfitsio3430patch/zlib/inffast.c +cfitsio3430patch/zlib/inffast.h +cfitsio3430patch/zlib/inffixed.h +cfitsio3430patch/zlib/inflate.c +cfitsio3430patch/zlib/inflate.h +cfitsio3430patch/zlib/inftrees.c +cfitsio3430patch/zlib/inftrees.h +cfitsio3430patch/zlib/trees.c +cfitsio3430patch/zlib/trees.h +cfitsio3430patch/zlib/uncompr.c +cfitsio3430patch/zlib/zcompress.c +cfitsio3430patch/zlib/zconf.h +cfitsio3430patch/zlib/zlib.h +cfitsio3430patch/zlib/zuncompress.c +cfitsio3430patch/zlib/zutil.c +cfitsio3430patch/zlib/zutil.h +fitsio/__init__.py +fitsio/fitsio_pywrap.c +fitsio/fitslib.py +fitsio/test.py +fitsio/util.py +fitsio.egg-info/PKG-INFO +fitsio.egg-info/SOURCES.txt +fitsio.egg-info/dependency_links.txt +fitsio.egg-info/requires.txt +fitsio.egg-info/top_level.txt \ No newline at end of file diff --git a/fitsio.egg-info/dependency_links.txt b/fitsio.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/fitsio.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/fitsio.egg-info/requires.txt b/fitsio.egg-info/requires.txt new file mode 100644 index 0000000..24ce15a --- /dev/null +++ b/fitsio.egg-info/requires.txt @@ -0,0 +1 @@ +numpy diff --git a/fitsio.egg-info/top_level.txt b/fitsio.egg-info/top_level.txt new file mode 100644 index 0000000..78387cd --- /dev/null +++ b/fitsio.egg-info/top_level.txt @@ -0,0 +1 @@ +fitsio diff --git a/fitsio/__init__.py b/fitsio/__init__.py new file mode 100644 index 0000000..74de019 --- /dev/null +++ b/fitsio/__init__.py @@ -0,0 +1,30 @@ +""" +A python library to read and write data to FITS files using cfitsio. +See the docs at https://github.com/esheldon/fitsio for example +usage. +""" + +__version__='0.9.12' + +from . import fitslib +from . import util + +from .fitslib import FITS +from .fitslib import FITSHDR +from .fitslib import FITSRecord +from .fitslib import FITSCard + +from .fitslib import read +from .fitslib import read_header +from .fitslib import read_scamp_head +from .fitslib import write +from .fitslib import READONLY +from .fitslib import READWRITE + +from .fitslib import BINARY_TBL, ASCII_TBL, IMAGE_HDU + +from .fitslib import FITSRuntimeWarning + +from .util import cfitsio_version + +from . import test diff --git a/fitsio/fitsio_pywrap.c b/fitsio/fitsio_pywrap.c new file mode 100644 index 0000000..32f3576 --- /dev/null +++ b/fitsio/fitsio_pywrap.c @@ -0,0 +1,4466 @@ +/* + * fitsio_pywrap.c + * + * This is a CPython wrapper for the cfitsio library. + + Copyright (C) 2011 Erin Sheldon, BNL. erin dot sheldon at gmail dot com + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +*/ + +#include +#include +#include "fitsio.h" +#include "fitsio2.h" +//#include "fitsio_pywrap_lists.h" +#include + + +// this is not defined anywhere in cfitsio except in +// the fits file structure +#define CFITSIO_MAX_ARRAY_DIMS 99 + +// not sure where this is defined in numpy... +#define NUMPY_MAX_DIMS 32 + +struct PyFITSObject { + PyObject_HEAD + fitsfile* fits; +}; + + +// check unicode for python3, string for python2 +int is_python_string(const PyObject* obj) +{ +#if PY_MAJOR_VERSION >= 3 + return PyUnicode_Check(obj) || PyBytes_Check(obj); +#else + return PyUnicode_Check(obj) || PyString_Check(obj); +#endif +} +/* + + get a string version of the object. New memory + is allocated and the receiver must clean it up. + +*/ + +// unicode is common to python 2 and 3 +static char* get_unicode_as_string(PyObject* obj) +{ + PyObject* tmp=NULL; + char* strdata=NULL; + tmp = PyObject_CallMethod(obj,"encode",NULL); + + strdata = strdup( PyBytes_AsString(tmp) ); + Py_XDECREF(tmp); + + return strdata; +} + +static char* get_object_as_string(PyObject* obj) +{ + PyObject* format=NULL; + PyObject* args=NULL; + char* strdata=NULL; + PyObject* tmpobj1=NULL; + + if (PyUnicode_Check(obj)) { + + strdata=get_unicode_as_string(obj); + + } else { + +#if PY_MAJOR_VERSION >= 3 + + if (PyBytes_Check(obj)) { + strdata = strdup( PyBytes_AsString(obj) ); + } else { + PyObject* tmpobj2=NULL; + format = Py_BuildValue("s","%s"); + // this is not a string object + args=PyTuple_New(1); + + PyTuple_SetItem(args,0,obj); + tmpobj2 = PyUnicode_Format(format, args); + tmpobj1 = PyObject_CallMethod(tmpobj2,"encode",NULL); + + Py_XDECREF(args); + Py_XDECREF(tmpobj2); + + strdata = strdup( PyBytes_AsString(tmpobj1) ); + Py_XDECREF(tmpobj1); + Py_XDECREF(format); + } + +#else + // convert to a string as needed + if (PyString_Check(obj)) { + strdata = strdup( PyString_AsString(obj) ); + } else { + format = Py_BuildValue("s","%s"); + args=PyTuple_New(1); + + PyTuple_SetItem(args,0,obj); + tmpobj1= PyString_Format(format, args); + + strdata = strdup( PyString_AsString(tmpobj1) ); + Py_XDECREF(args); + Py_XDECREF(tmpobj1); + Py_XDECREF(format); + } +#endif + } + + return strdata; +} + +static void +set_ioerr_string_from_status(int status) { + char status_str[FLEN_STATUS], errmsg[FLEN_ERRMSG]; + char message[1024]; + + int nleft=1024; + + if (status) { + fits_get_errstatus(status, status_str); /* get the error description */ + + sprintf(message, "FITSIO status = %d: %s\n", status, status_str); + + nleft -= strlen(status_str)+1; + + while ( nleft > 0 && fits_read_errmsg(errmsg) ) { /* get error stack messages */ + strncat(message, errmsg, nleft-1); + nleft -= strlen(errmsg)+1; + if (nleft >= 2) { + strncat(message, "\n", nleft-1); + } + nleft-=2; + } + PyErr_SetString(PyExc_IOError, message); + } + return; +} + +/* + string list helper functions +*/ + +struct stringlist { + size_t size; + char** data; +}; + +static struct stringlist* stringlist_new(void) { + struct stringlist* slist=NULL; + + slist = malloc(sizeof(struct stringlist)); + slist->size = 0; + slist->data=NULL; + return slist; +} +// push a copy of the string onto the string list +static void stringlist_push(struct stringlist* slist, const char* str) { + size_t newsize=0; + size_t i=0; + + newsize = slist->size+1; + slist->data = realloc(slist->data, sizeof(char*)*newsize); + slist->size += 1; + + i = slist->size-1; + + slist->data[i] = strdup(str); +} + +static void stringlist_push_size(struct stringlist* slist, size_t slen) { + size_t newsize=0; + size_t i=0; + + newsize = slist->size+1; + slist->data = realloc(slist->data, sizeof(char*)*newsize); + slist->size += 1; + + i = slist->size-1; + + slist->data[i] = calloc(slen+1,sizeof(char)); + //slist->data[i] = malloc(sizeof(char)*(slen+1)); + //memset(slist->data[i], 0, slen+1); +} +static struct stringlist* stringlist_delete(struct stringlist* slist) { + if (slist != NULL) { + size_t i=0; + if (slist->data != NULL) { + for (i=0; i < slist->size; i++) { + free(slist->data[i]); + } + } + free(slist->data); + free(slist); + } + return NULL; +} + + +/* +static void stringlist_print(struct stringlist* slist) { + size_t i=0; + if (slist == NULL) { + return; + } + for (i=0; isize; i++) { + printf(" slist[%ld]: %s\n", i, slist->data[i]); + } +} +*/ + + +static int stringlist_addfrom_listobj(struct stringlist* slist, + PyObject* listObj, + const char* listname) { + size_t size=0, i=0; + char* tmpstr=NULL; + + if (!PyList_Check(listObj)) { + PyErr_Format(PyExc_ValueError, "Expected a list for %s.", listname); + return 1; + } + size = PyList_Size(listObj); + + for (i=0; ifits, filename, &status)) { + set_ioerr_string_from_status(status); + return -1; + } + } else { + if (fits_open_file(&self->fits, filename, mode, &status)) { + set_ioerr_string_from_status(status); + return -1; + } + } + + return 0; +} + + +static PyObject * +PyFITSObject_repr(struct PyFITSObject* self) { + + if (self->fits != NULL) { + int status=0; + char filename[FLEN_FILENAME]; + char repr[2056]; + + if (fits_file_name(self->fits, filename, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + sprintf(repr, "fits file: %s", filename); + return Py_BuildValue("s",repr); + } else { + return Py_BuildValue("s","none"); + } +} + +static PyObject * +PyFITSObject_filename(struct PyFITSObject* self) { + + if (self->fits != NULL) { + int status=0; + char filename[FLEN_FILENAME]; + PyObject* fnameObj=NULL; + if (fits_file_name(self->fits, filename, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + fnameObj = Py_BuildValue("s",filename); + return fnameObj; + } else { + PyErr_SetString(PyExc_ValueError, "file is not open, cannot determine name"); + return NULL; + } +} + + + +static PyObject * +PyFITSObject_close(struct PyFITSObject* self) +{ + int status=0; + if (fits_close_file(self->fits, &status)) { + self->fits=NULL; + /* + set_ioerr_string_from_status(status); + return NULL; + */ + } + self->fits=NULL; + Py_RETURN_NONE; +} + + + +static void +PyFITSObject_dealloc(struct PyFITSObject* self) +{ + int status=0; + fits_close_file(self->fits, &status); +#if PY_MAJOR_VERSION >= 3 + // introduced in python 2.6 + Py_TYPE(self)->tp_free((PyObject*)self); +#else + // old way, removed in python 3 + self->ob_type->tp_free((PyObject*)self); +#endif +} + + +// this will need to be updated for array string columns. +// I'm using a tcolumn* here, could cause problems +static long get_groupsize(tcolumn* colptr) { + long gsize=0; + if (colptr->tdatatype == TSTRING) { + //gsize = colptr->twidth; + gsize = colptr->trepeat; + } else { + gsize = colptr->twidth*colptr->trepeat; + } + return gsize; +} +static npy_int64* get_int64_from_array(PyObject* arr, npy_intp* ncols) { + + npy_int64* colnums; + int npy_type=0, check=0; + + if (!PyArray_Check(arr)) { + PyErr_SetString(PyExc_TypeError, "int64 array must be an array."); + return NULL; + } + + npy_type = PyArray_TYPE(arr); + + // on some platforms, creating an 'i8' array gives it a longlong + // dtype. Just make sure it is 8 bytes + check= + (npy_type == NPY_INT64) + | + (npy_type==NPY_LONGLONG && sizeof(npy_longlong)==sizeof(npy_int64)); + if (!check) { + PyErr_Format(PyExc_TypeError, + "array must be an int64 array (%d), got %d.", + NPY_INT64,npy_type); + return NULL; + } + if (!PyArray_ISCONTIGUOUS(arr)) { + PyErr_SetString(PyExc_TypeError, "int64 array must be a contiguous."); + return NULL; + } + + colnums = PyArray_DATA(arr); + *ncols = PyArray_SIZE(arr); + + return colnums; +} + +// move hdu by name and possibly version, return the hdu number +static PyObject * +PyFITSObject_movnam_hdu(struct PyFITSObject* self, PyObject* args) { + int status=0; + int hdutype=ANY_HDU; // means we don't care if its image or table + char* extname=NULL; + int extver=0; // zero means it is ignored + int hdunum=0; + + if (self->fits == NULL) { + PyErr_SetString(PyExc_ValueError, "fits file is NULL"); + return NULL; + } + + if (!PyArg_ParseTuple(args, (char*)"isi", &hdutype, &extname, &extver)) { + return NULL; + } + + if (fits_movnam_hdu(self->fits, hdutype, extname, extver, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + fits_get_hdu_num(self->fits, &hdunum); + return PyLong_FromLong((long)hdunum); +} + + + +static PyObject * +PyFITSObject_movabs_hdu(struct PyFITSObject* self, PyObject* args) { + int hdunum=0, hdutype=0; + int status=0; + PyObject* hdutypeObj=NULL; + + if (self->fits == NULL) { + PyErr_SetString(PyExc_ValueError, "fits file is NULL"); + return NULL; + } + + if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) { + return NULL; + } + + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + hdutypeObj = PyLong_FromLong((long)hdutype); + return hdutypeObj; +} + +// get info for the specified HDU +static PyObject * +PyFITSObject_get_hdu_info(struct PyFITSObject* self, PyObject* args) { + int hdunum=0, hdutype=0, ext=0; + int status=0, tstatus=0, is_compressed=0; + PyObject* dict=NULL; + + char extname[FLEN_VALUE]; + char hduname[FLEN_VALUE]; + int extver=0, hduver=0; + + long long header_start; + long long data_start; + long long data_end; + + if (self->fits == NULL) { + PyErr_SetString(PyExc_ValueError, "fits file is NULL"); + return NULL; + } + + if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) { + return NULL; + } + + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + + + + + dict = PyDict_New(); + ext=hdunum-1; + + add_long_to_dict(dict, "hdunum", (long)hdunum); + add_long_to_dict(dict, "extnum", (long)ext); + add_long_to_dict(dict, "hdutype", (long)hdutype); + + + tstatus=0; + if (fits_read_key(self->fits, TSTRING, "EXTNAME", extname, NULL, &tstatus)==0) { + add_string_to_dict(dict, "extname", extname); + } else { + add_string_to_dict(dict, "extname", ""); + } + + tstatus=0; + if (fits_read_key(self->fits, TSTRING, "HDUNAME", hduname, NULL, &tstatus)==0) { + add_string_to_dict(dict, "hduname", hduname); + } else { + add_string_to_dict(dict, "hduname", ""); + } + + tstatus=0; + if (fits_read_key(self->fits, TINT, "EXTVER", &extver, NULL, &tstatus)==0) { + add_long_to_dict(dict, "extver", (long)extver); + } else { + add_long_to_dict(dict, "extver", (long)0); + } + + tstatus=0; + if (fits_read_key(self->fits, TINT, "HDUVER", &hduver, NULL, &tstatus)==0) { + add_long_to_dict(dict, "hduver", (long)hduver); + } else { + add_long_to_dict(dict, "hduver", (long)0); + } + + tstatus=0; + is_compressed=fits_is_compressed_image(self->fits, &tstatus); + add_long_to_dict(dict, "is_compressed_image", (long)is_compressed); + + + // get byte offsets + if (0==fits_get_hduaddrll(self->fits, &header_start, &data_start, &data_end, &tstatus)) { + add_long_long_to_dict(dict, "header_start", (long)header_start); + add_long_long_to_dict(dict, "data_start", (long)data_start); + add_long_long_to_dict(dict, "data_end", (long)data_end); + } else { + add_long_long_to_dict(dict, "header_start", -1); + add_long_long_to_dict(dict, "data_start", -1); + add_long_long_to_dict(dict, "data_end", -1); + } + + + int ndims=0; + int maxdim=CFITSIO_MAX_ARRAY_DIMS; + LONGLONG dims[CFITSIO_MAX_ARRAY_DIMS]; + if (hdutype == IMAGE_HDU) { + // move this into it's own func + int tstatus=0; + int bitpix=0; + int bitpix_equiv=0; + char comptype[20]; + PyObject* dimsObj=PyList_New(0); + int i=0; + + //if (fits_read_imghdrll(self->fits, maxdim, simple_p, &bitpix, &ndims, + // dims, pcount_p, gcount_p, extend_p, &status)) { + if (fits_get_img_paramll(self->fits, maxdim, &bitpix, &ndims, dims, &tstatus)) { + add_string_to_dict(dict,"error","could not determine image parameters"); + } else { + add_long_to_dict(dict,"ndims",(long)ndims); + add_long_to_dict(dict,"img_type",(long)bitpix); + + fits_get_img_equivtype(self->fits, &bitpix_equiv, &status); + add_long_to_dict(dict,"img_equiv_type",(long)bitpix_equiv); + + tstatus=0; + if (fits_read_key(self->fits, TSTRING, "ZCMPTYPE", + comptype, NULL, &tstatus)==0) { + add_string_to_dict(dict,"comptype",comptype); + } else { + add_none_to_dict(dict,"comptype"); + } + + for (i=0; ifits, &nrows, &tstatus); + fits_get_num_cols(self->fits, &ncols, &tstatus); + add_long_long_to_dict(dict,"nrows",(long long)nrows); + add_long_to_dict(dict,"ncols",(long)ncols); + + { + PyObject* d = NULL; + tcolumn* col=NULL; + struct stringlist* names=NULL; + struct stringlist* tforms=NULL; + names=stringlist_new(); + tforms=stringlist_new(); + + for (i=0; ifits, ncols, NULL, NULL, + names->data, tforms->data, + NULL, NULL, NULL, &tstatus); + + for (i=0; idata[i]); + add_string_to_dict(d,"tform",tforms->data[i]); + + fits_get_coltypell(self->fits, i+1, &type, &repeat, &width, &tstatus); + add_long_to_dict(d,"type",(long)type); + add_long_long_to_dict(d,"repeat",(long long)repeat); + add_long_long_to_dict(d,"width",(long long)width); + + fits_get_eqcoltypell(self->fits,i+1,&type,&repeat,&width, &tstatus); + add_long_to_dict(d,"eqtype",(long)type); + + tstatus=0; + if (fits_read_tdimll(self->fits, i+1, maxdim, &ndims, dims, + &tstatus)) { + add_none_to_dict(d,"tdim"); + } else { + PyObject* dimsObj=PyList_New(0); + for (j=0; jfits->Fptr->tableptr[i]; + add_double_to_dict(d,"tscale",col->tscale); + add_double_to_dict(d,"tzero",col->tzero); + + PyList_Append(colinfo, d); + Py_XDECREF(d); + } + names=stringlist_delete(names); + tforms=stringlist_delete(tforms); + + PyDict_SetItemString(dict, "colinfo", colinfo); + Py_XDECREF(colinfo); + } + } else { + int tstatus=0; + LONGLONG nrows=0; + int ncols=0; + PyObject* colinfo = PyList_New(0); + int i=0,j=0; + + fits_get_num_rowsll(self->fits, &nrows, &tstatus); + fits_get_num_cols(self->fits, &ncols, &tstatus); + add_long_long_to_dict(dict,"nrows",(long long)nrows); + add_long_to_dict(dict,"ncols",(long)ncols); + + { + tcolumn* col=NULL; + struct stringlist* names=NULL; + struct stringlist* tforms=NULL; + names=stringlist_new(); + tforms=stringlist_new(); + + for (i=0; ifits, ncols, NULL, NULL, + // tfields tbcol units + NULL, names->data, NULL, tforms->data, NULL, + // extname + NULL, &tstatus); + + + + for (i=0; idata[i]); + add_string_to_dict(d,"tform",tforms->data[i]); + + fits_get_coltypell(self->fits, i+1, &type, &repeat, &width, &tstatus); + add_long_to_dict(d,"type",(long)type); + add_long_long_to_dict(d,"repeat",(long long)repeat); + add_long_long_to_dict(d,"width",(long long)width); + + fits_get_eqcoltypell(self->fits, i+1, &type, &repeat, &width, &tstatus); + add_long_to_dict(d,"eqtype",(long)type); + + tstatus=0; + if (fits_read_tdimll(self->fits, i+1, maxdim, &ndims, dims, + &tstatus)) { + add_none_to_dict(dict,"tdim"); + } else { + PyObject* dimsObj=PyList_New(0); + for (j=0; jfits->Fptr->tableptr[i]; + add_double_to_dict(d,"tscale",col->tscale); + add_double_to_dict(d,"tzero",col->tzero); + + PyList_Append(colinfo, d); + Py_XDECREF(d); + } + names=stringlist_delete(names); + tforms=stringlist_delete(tforms); + + PyDict_SetItemString(dict, "colinfo", colinfo); + Py_XDECREF(colinfo); + } + + } + return dict; +} + + +// this is the parameter that goes in the type for fits_write_col +static int +npy_to_fits_table_type(int npy_dtype, int write_bitcols) { + + char mess[255]; + switch (npy_dtype) { + case NPY_BOOL: + if (write_bitcols) { + return TBIT; + } else { + return TLOGICAL; + } + case NPY_UINT8: + return TBYTE; + case NPY_INT8: + return TSBYTE; + case NPY_UINT16: + return TUSHORT; + case NPY_INT16: + return TSHORT; + case NPY_UINT32: + if (sizeof(unsigned int) == sizeof(npy_uint32)) { + return TUINT; + } else if (sizeof(unsigned long) == sizeof(npy_uint32)) { + return TULONG; + } else { + PyErr_SetString(PyExc_TypeError, "could not determine 4 byte unsigned integer type"); + return -9999; + } + case NPY_INT32: + if (sizeof(int) == sizeof(npy_int32)) { + return TINT; + } else if (sizeof(long) == sizeof(npy_int32)) { + return TLONG; + } else { + PyErr_SetString(PyExc_TypeError, "could not determine 4 byte integer type"); + return -9999; + } + + case NPY_INT64: + if (sizeof(long long) == sizeof(npy_int64)) { + return TLONGLONG; + } else if (sizeof(long) == sizeof(npy_int64)) { + return TLONG; + } else if (sizeof(int) == sizeof(npy_int64)) { + return TINT; + } else { + PyErr_SetString(PyExc_TypeError, "could not determine 8 byte integer type"); + return -9999; + } + + + case NPY_FLOAT32: + return TFLOAT; + case NPY_FLOAT64: + return TDOUBLE; + + case NPY_COMPLEX64: + return TCOMPLEX; + case NPY_COMPLEX128: + return TDBLCOMPLEX; + + case NPY_STRING: + return TSTRING; + + case NPY_UINT64: + PyErr_SetString(PyExc_TypeError, "Unsigned 8 byte integer images are not supported by the FITS standard"); + return -9999; + + default: + sprintf(mess,"Unsupported numpy table datatype %d", npy_dtype); + PyErr_SetString(PyExc_TypeError, mess); + return -9999; + } + + return 0; +} + + + +static int +npy_to_fits_image_types(int npy_dtype, int *fits_img_type, int *fits_datatype) { + + char mess[255]; + switch (npy_dtype) { + case NPY_UINT8: + *fits_img_type = BYTE_IMG; + *fits_datatype = TBYTE; + break; + case NPY_INT8: + *fits_img_type = SBYTE_IMG; + *fits_datatype = TSBYTE; + break; + case NPY_UINT16: + *fits_img_type = USHORT_IMG; + *fits_datatype = TUSHORT; + break; + case NPY_INT16: + *fits_img_type = SHORT_IMG; + *fits_datatype = TSHORT; + break; + + case NPY_UINT32: + //*fits_img_type = ULONG_IMG; + if (sizeof(unsigned short) == sizeof(npy_uint32)) { + *fits_img_type = USHORT_IMG; + *fits_datatype = TUSHORT; + } else if (sizeof(unsigned int) == sizeof(npy_uint32)) { + // there is no UINT_IMG, so use ULONG_IMG + *fits_img_type = ULONG_IMG; + *fits_datatype = TUINT; + } else if (sizeof(unsigned long) == sizeof(npy_uint32)) { + *fits_img_type = ULONG_IMG; + *fits_datatype = TULONG; + } else { + PyErr_SetString(PyExc_TypeError, "could not determine 4 byte unsigned integer type"); + *fits_datatype = -9999; + return 1; + } + break; + + case NPY_INT32: + //*fits_img_type = LONG_IMG; + if (sizeof(unsigned short) == sizeof(npy_uint32)) { + *fits_img_type = SHORT_IMG; + *fits_datatype = TINT; + } else if (sizeof(int) == sizeof(npy_int32)) { + // there is no UINT_IMG, so use ULONG_IMG + *fits_img_type = LONG_IMG; + *fits_datatype = TINT; + } else if (sizeof(long) == sizeof(npy_int32)) { + *fits_img_type = LONG_IMG; + *fits_datatype = TLONG; + } else { + PyErr_SetString(PyExc_TypeError, "could not determine 4 byte integer type"); + *fits_datatype = -9999; + return 1; + } + break; + + case NPY_INT64: + //*fits_img_type = LONGLONG_IMG; + if (sizeof(int) == sizeof(npy_int64)) { + // there is no UINT_IMG, so use ULONG_IMG + *fits_img_type = LONG_IMG; + *fits_datatype = TINT; + } else if (sizeof(long) == sizeof(npy_int64)) { + *fits_img_type = LONG_IMG; + *fits_datatype = TLONG; + } else if (sizeof(long long) == sizeof(npy_int64)) { + *fits_img_type = LONGLONG_IMG; + *fits_datatype = TLONGLONG; + } else { + PyErr_SetString(PyExc_TypeError, "could not determine 8 byte integer type"); + *fits_datatype = -9999; + return 1; + } + break; + + + case NPY_FLOAT32: + *fits_img_type = FLOAT_IMG; + *fits_datatype = TFLOAT; + break; + case NPY_FLOAT64: + *fits_img_type = DOUBLE_IMG; + *fits_datatype = TDOUBLE; + break; + + case NPY_UINT64: + PyErr_SetString(PyExc_TypeError, "Unsigned 8 byte integer images are not supported by the FITS standard"); + *fits_datatype = -9999; + return 1; + break; + + default: + sprintf(mess,"Unsupported numpy image datatype %d", npy_dtype); + PyErr_SetString(PyExc_TypeError, mess); + *fits_datatype = -9999; + return 1; + break; + } + + return 0; +} + + +/* + * this is really only for reading variable length columns since we should be + * able to just read the bytes for normal columns + */ +static int fits_to_npy_table_type(int fits_dtype, int* isvariable) { + + if (fits_dtype < 0) { + *isvariable=1; + } else { + *isvariable=0; + } + + switch (abs(fits_dtype)) { + case TBIT: + return NPY_INT8; + case TLOGICAL: // literal T or F stored as char + return NPY_INT8; + case TBYTE: + return NPY_UINT8; + case TSBYTE: + return NPY_INT8; + + case TUSHORT: + if (sizeof(unsigned short) == sizeof(npy_uint16)) { + return NPY_UINT16; + } else if (sizeof(unsigned short) == sizeof(npy_uint8)) { + return NPY_UINT8; + } else { + PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TUSHORT"); + return -9999; + } + case TSHORT: + if (sizeof(short) == sizeof(npy_int16)) { + return NPY_INT16; + } else if (sizeof(short) == sizeof(npy_int8)) { + return NPY_INT8; + } else { + PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TSHORT"); + return -9999; + } + + case TUINT: + if (sizeof(unsigned int) == sizeof(npy_uint32)) { + return NPY_UINT32; + } else if (sizeof(unsigned int) == sizeof(npy_uint64)) { + return NPY_UINT64; + } else if (sizeof(unsigned int) == sizeof(npy_uint16)) { + return NPY_UINT16; + } else { + PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TUINT"); + return -9999; + } + case TINT: + if (sizeof(int) == sizeof(npy_int32)) { + return NPY_INT32; + } else if (sizeof(int) == sizeof(npy_int64)) { + return NPY_INT64; + } else if (sizeof(int) == sizeof(npy_int16)) { + return NPY_INT16; + } else { + PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TINT"); + return -9999; + } + + case TULONG: + if (sizeof(unsigned long) == sizeof(npy_uint32)) { + return NPY_UINT32; + } else if (sizeof(unsigned long) == sizeof(npy_uint64)) { + return NPY_UINT64; + } else if (sizeof(unsigned long) == sizeof(npy_uint16)) { + return NPY_UINT16; + } else { + PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TULONG"); + return -9999; + } + case TLONG: + if (sizeof(unsigned long) == sizeof(npy_int32)) { + return NPY_INT32; + } else if (sizeof(unsigned long) == sizeof(npy_int64)) { + return NPY_INT64; + } else if (sizeof(long) == sizeof(npy_int16)) { + return NPY_INT16; + } else { + PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TLONG"); + return -9999; + } + + + case TLONGLONG: + if (sizeof(LONGLONG) == sizeof(npy_int64)) { + return NPY_INT64; + } else if (sizeof(LONGLONG) == sizeof(npy_int32)) { + return NPY_INT32; + } else if (sizeof(LONGLONG) == sizeof(npy_int16)) { + return NPY_INT16; + } else { + PyErr_SetString(PyExc_TypeError, "could not determine numpy type for fits TLONGLONG"); + return -9999; + } + + + + case TFLOAT: + return NPY_FLOAT32; + case TDOUBLE: + return NPY_FLOAT64; + + case TCOMPLEX: + return NPY_COMPLEX64; + case TDBLCOMPLEX: + return NPY_COMPLEX128; + + + case TSTRING: + return NPY_STRING; + + default: + PyErr_Format(PyExc_TypeError,"Unsupported FITS table datatype %d", fits_dtype); + return -9999; + } + + return 0; +} + + + +static int create_empty_hdu(struct PyFITSObject* self) +{ + int status=0; + int bitpix=SHORT_IMG; + int naxis=0; + long* naxes=NULL; + if (fits_create_img(self->fits, bitpix, naxis, naxes, &status)) { + set_ioerr_string_from_status(status); + return 1; + } + + return 0; +} + + +// follows fits convention that return value is true +// for failure +// +// exception strings are set internally +// +// length checking should happen in python +// +// note tile dims are written reverse order since +// python orders C and fits orders Fortran +static int set_compression(fitsfile *fits, + int comptype, + PyObject* tile_dims_obj, + int *status) { + + npy_int64 *tile_dims_py=NULL; + long *tile_dims_fits=NULL; + npy_intp ndims=0, i=0; + + // can be NOCOMPRESS (0) + if (fits_set_compression_type(fits, comptype, status)) { + set_ioerr_string_from_status(*status); + goto _set_compression_bail; + return 1; + } + + if (tile_dims_obj != Py_None) { + + tile_dims_py=get_int64_from_array(tile_dims_obj, &ndims); + if (tile_dims_py==NULL) { + *status=1; + } else { + tile_dims_fits = calloc(ndims,sizeof(long)); + if (!tile_dims_fits) { + PyErr_Format(PyExc_MemoryError, "failed to allocate %ld longs", + ndims); + goto _set_compression_bail; + } + + for (i=0; ind; +} + +/* + Create an image extension, possible writing data as well. + + We allow creating from dimensions rather than from the input image shape, + writing into the HDU later + + It is useful to create the extension first so we can write keywords into the + header before adding data. This avoids moving the data if the header grows + too large. + + However, on distributed file systems it can be more efficient to write + the data at this time due to slowness with updating the file in place. + + */ + +static PyObject * +PyFITSObject_create_image_hdu(struct PyFITSObject* self, PyObject* args, PyObject* kwds) { + int ndims=0; + long *dims=NULL; + int image_datatype=0; // fits type for image, AKA bitpix + int datatype=0; // type for the data we entered + + int comptype=0; // same as NOCOMPRESS in newer cfitsio + PyObject* tile_dims_obj=NULL; + + PyObject* array, *dims_obj; + int npy_dtype=0, nkeys=0, write_data=0; + int i=0; + int status=0; + + char* extname=NULL; + int extver=0; + + if (self->fits == NULL) { + PyErr_SetString(PyExc_ValueError, "fits file is NULL"); + return NULL; + } + + static char *kwlist[] = + {"array","nkeys","dims","comptype","tile_dims","extname", "extver", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "Oi|OiOsi", kwlist, + &array, &nkeys, &dims_obj, &comptype, &tile_dims_obj, + &extname, &extver)) { + goto create_image_hdu_cleanup; + } + + + if (array == Py_None) { + if (create_empty_hdu(self)) { + return NULL; + } + } else { + if (!PyArray_Check(array)) { + PyErr_SetString(PyExc_TypeError, "input must be an array."); + goto create_image_hdu_cleanup; + } + + npy_dtype = PyArray_TYPE(array); + if (npy_to_fits_image_types(npy_dtype, &image_datatype, &datatype)) { + goto create_image_hdu_cleanup; + } + + if (PyArray_Check(dims_obj)) { + // get dims from input, which must be of type 'i8' + // this means we are not writing the array that was input, + // it is only used to determine the data type + npy_int64 *tptr=NULL, tmp=0; + ndims = PyArray_SIZE(dims_obj); + dims = calloc(ndims,sizeof(long)); + for (i=0; i 0) { + // exception strings are set internally + if (set_compression(self->fits, comptype, tile_dims_obj, &status)) { + goto create_image_hdu_cleanup; + } + } + + if (fits_create_img(self->fits, image_datatype, ndims, dims, &status)) { + set_ioerr_string_from_status(status); + goto create_image_hdu_cleanup; + } + + + } + if (extname != NULL) { + if (strlen(extname) > 0) { + + // comments are NULL + if (fits_update_key_str(self->fits, "EXTNAME", extname, NULL, &status)) { + set_ioerr_string_from_status(status); + goto create_image_hdu_cleanup; + } + if (extver > 0) { + if (fits_update_key_lng(self->fits, "EXTVER", (LONGLONG) extver, NULL, &status)) { + set_ioerr_string_from_status(status); + goto create_image_hdu_cleanup; + } + } + } + } + + if (nkeys > 0) { + if (fits_set_hdrsize(self->fits, nkeys, &status) ) { + set_ioerr_string_from_status(status); + goto create_image_hdu_cleanup; + } + } + + if (write_data) { + int firstpixel=1; + LONGLONG nelements = 0; + void* data=NULL; + nelements = PyArray_SIZE(array); + data = PyArray_DATA(array); + if (fits_write_img(self->fits, datatype, firstpixel, nelements, data, &status)) { + set_ioerr_string_from_status(status); + goto create_image_hdu_cleanup; + } + } + + // this does a full close and reopen + if (fits_flush_file(self->fits, &status)) { + set_ioerr_string_from_status(status); + goto create_image_hdu_cleanup; + } + + +create_image_hdu_cleanup: + + if (status != 0) { + return NULL; + } + + free(dims); dims=NULL; + Py_RETURN_NONE; +} + + +// reshape the image to specified dims +// the input array must be of type int64 +static PyObject * +PyFITSObject_reshape_image(struct PyFITSObject* self, PyObject* args) { + + int status=0; + int hdunum=0, hdutype=0; + PyObject* dims_obj=NULL; + LONGLONG dims[CFITSIO_MAX_ARRAY_DIMS]={0}; + LONGLONG dims_orig[CFITSIO_MAX_ARRAY_DIMS]={0}; + int ndims=0, ndims_orig=0; + npy_int64 dim=0; + npy_intp i=0; + int bitpix=0, maxdim=CFITSIO_MAX_ARRAY_DIMS; + + if (self->fits == NULL) { + PyErr_SetString(PyExc_ValueError, "fits file is NULL"); + return NULL; + } + + if (!PyArg_ParseTuple(args, (char*)"iO", &hdunum, &dims_obj)) { + return NULL; + } + + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // existing image params, just to get bitpix + if (fits_get_img_paramll(self->fits, maxdim, &bitpix, &ndims_orig, dims_orig, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + ndims = PyArray_SIZE(dims_obj); + for (i=0; ifits, bitpix, ndims, dims, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + Py_RETURN_NONE; +} + +// write the image to an existing HDU created using create_image_hdu +// dims are not checked +static PyObject * +PyFITSObject_write_image(struct PyFITSObject* self, PyObject* args) { + int hdunum=0; + int hdutype=0; + LONGLONG nelements=1; + PY_LONG_LONG firstpixel_py=0; + LONGLONG firstpixel=0; + int image_datatype=0; // fits type for image, AKA bitpix + int datatype=0; // type for the data we entered + + PyObject* array; + void* data=NULL; + int npy_dtype=0; + int status=0; + + if (self->fits == NULL) { + PyErr_SetString(PyExc_ValueError, "fits file is NULL"); + return NULL; + } + + if (!PyArg_ParseTuple(args, (char*)"iOL", &hdunum, &array, &firstpixel_py)) { + return NULL; + } + + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (!PyArray_Check(array)) { + PyErr_SetString(PyExc_TypeError, "input must be an array."); + return NULL; + } + + npy_dtype = PyArray_TYPE(array); + if (npy_to_fits_image_types(npy_dtype, &image_datatype, &datatype)) { + return NULL; + } + + + data = PyArray_DATA(array); + nelements = PyArray_SIZE(array); + firstpixel = (LONGLONG) firstpixel_py; + if (fits_write_img(self->fits, datatype, firstpixel, nelements, data, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // this is a full file close and reopen + if (fits_flush_file(self->fits, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + Py_RETURN_NONE; +} + + +/* + * Write tdims from the list. The list must be the expected length. + * Entries must be strings or None; if None the tdim is not written. + * + * The keys are written as TDIM{colnum} + */ +static int +add_tdims_from_listobj(fitsfile* fits, PyObject* tdimObj, int ncols) { + int status=0; + size_t size=0, i=0; + char keyname[20]; + int colnum=0; + PyObject* tmp=NULL; + char* tdim=NULL; + + if (tdimObj == NULL || tdimObj == Py_None) { + // it is ok for it to be empty + return 0; + } + + if (!PyList_Check(tdimObj)) { + PyErr_SetString(PyExc_ValueError, "Expected a list for tdims"); + return 1; + } + + size = PyList_Size(tdimObj); + if (size != ncols) { + PyErr_Format(PyExc_ValueError, "Expected %d elements in tdims list, got %ld", ncols, size); + return 1; + } + + for (i=0; i 0) { + extname_use = extname; + } + } + nfields = ttyp->size; + if ( fits_create_tbl(self->fits, table_type, nrows, nfields, + ttyp->data, tform->data, tunit->data, extname_use, &status) ) { + set_ioerr_string_from_status(status); + goto create_table_cleanup; + } + + if (add_tdims_from_listobj(self->fits, tdimObj, nfields)) { + status=99; + goto create_table_cleanup; + } + + if (extname_use != NULL) { + if (extver > 0) { + + if (fits_update_key_lng(self->fits, "EXTVER", (LONGLONG) extver, NULL, &status)) { + set_ioerr_string_from_status(status); + goto create_table_cleanup; + } + } + } + + if (nkeys > 0) { + if (fits_set_hdrsize(self->fits, nkeys, &status) ) { + set_ioerr_string_from_status(status); + goto create_table_cleanup; + } + } + + // this does a full close and reopen + if (fits_flush_file(self->fits, &status)) { + set_ioerr_string_from_status(status); + goto create_table_cleanup; + } + +create_table_cleanup: + ttyp = stringlist_delete(ttyp); + tform = stringlist_delete(tform); + tunit = stringlist_delete(tunit); + //tdim = stringlist_delete(tdim); + + + if (status != 0) { + return NULL; + } + Py_RETURN_NONE; +} + + + + +// create a new table structure. No physical rows are added yet. +static PyObject * +PyFITSObject_insert_col(struct PyFITSObject* self, PyObject* args, PyObject* kwds) { + int status=0; + int hdunum=0; + int colnum=0; + + int hdutype=0; + + static char *kwlist[] = {"hdunum","colnum","ttyp","tform","tdim", NULL}; + // these are all strings + char* ttype=NULL; // field name + char* tform=NULL; // format + PyObject* tdimObj=NULL; // optional, a list of len 1 + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiss|O", kwlist, + &hdunum, &colnum, &ttype, &tform, &tdimObj)) { + return NULL; + } + + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (fits_insert_col(self->fits, colnum, ttype, tform, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // OK if dims are not sent + if (tdimObj != NULL && tdimObj != Py_None) { + PyObject* tmp=NULL; + char* tdim=NULL; + char keyname[20]; + + sprintf(keyname, "TDIM%d", colnum); + tmp = PyList_GetItem(tdimObj, 0); + + tdim = get_object_as_string(tmp); + fits_write_key(self->fits, TSTRING, keyname, tdim, NULL, &status); + free(tdim); + + if (status) { + set_ioerr_string_from_status(status); + return NULL; + } + } + + // this does a full close and reopen + if (fits_flush_file(self->fits, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + Py_RETURN_NONE; +} + + + + +// No error checking performed here +static +int write_string_column( + fitsfile *fits, /* I - FITS file pointer */ + int colnum, /* I - number of column to write (1 = 1st col) */ + LONGLONG firstrow, /* I - first row to write (1 = 1st row) */ + LONGLONG firstelem, /* I - first vector element to write (1 = 1st) */ + LONGLONG nelem, /* I - number of strings to write */ + char *data, + int *status) { /* IO - error status */ + + LONGLONG i=0; + LONGLONG twidth=0; + // need to create a char** representation of the data, just point back + // into the data array at string width offsets. the fits_write_col_str + // takes care of skipping between fields. + char* cdata=NULL; + char** strdata=NULL; + + // using struct def here, could cause problems + twidth = fits->Fptr->tableptr[colnum-1].twidth; + + strdata = malloc(nelem*sizeof(char*)); + if (strdata == NULL) { + PyErr_SetString(PyExc_MemoryError, "could not allocate temporary string pointers"); + *status = 99; + return 1; + } + cdata = (char* ) data; + for (i=0; ifits == NULL) { + PyErr_SetString(PyExc_ValueError, "fits file is NULL"); + return NULL; + } + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiOLi", + kwlist, &hdunum, &colnum, &array, &firstrow_py, &write_bitcols)) { + return NULL; + } + firstrow = (LONGLONG) firstrow_py; + + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + + if (!PyArray_Check(array)) { + PyErr_SetString(PyExc_ValueError,"only arrays can be written to columns"); + return NULL; + } + + npy_dtype = PyArray_TYPE(array); + fits_dtype = npy_to_fits_table_type(npy_dtype, write_bitcols); + if (fits_dtype == -9999) { + return NULL; + } + + data = PyArray_DATA(array); + nelem = PyArray_SIZE(array); + + if (fits_dtype == TSTRING) { + + // this is my wrapper for strings + if (write_string_column(self->fits, colnum, firstrow, firstelem, nelem, data, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + } else if (fits_dtype == TBIT) { + if (fits_write_col_bit(self->fits, colnum, firstrow, firstelem, nelem, data, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + } else { + if( fits_write_col(self->fits, fits_dtype, colnum, firstrow, firstelem, nelem, data, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + } + + // this is a full file close and reopen + if (fits_flush_file(self->fits, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + + Py_RETURN_NONE; +} + + +static PyObject * +PyFITSObject_write_columns(struct PyFITSObject* self, PyObject* args, PyObject* kwds) { + int status=0; + int hdunum=0; + int hdutype=0; + int write_bitcols=0; + //void **data_ptrs=NULL; + PyObject* colnum_list=NULL; + PyObject* array_list=NULL; + PyObject *tmp_array=NULL, *tmp_obj=NULL; + + Py_ssize_t ncols=0; + + void* data=NULL; + PY_LONG_LONG firstrow_py=0; + LONGLONG firstrow=1, thisrow=0; + LONGLONG firstelem=1; + LONGLONG nelem=0; + LONGLONG *nperrow=NULL; + int npy_dtype=0; + int *fits_dtypes=NULL; + int *is_string=NULL, *colnums=NULL; + void **array_ptrs=NULL; + + npy_intp ndim=0, *dims=NULL; + Py_ssize_t irow=0, icol=0, j=0;; + + static char *kwlist[] = {"hdunum","colnums","arraylist","firstrow","write_bitcols", NULL}; + + if (self->fits == NULL) { + PyErr_SetString(PyExc_ValueError, "fits file is NULL"); + return NULL; + } + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "iOOLi", + kwlist, &hdunum, &colnum_list, &array_list, &firstrow_py, &write_bitcols)) { + return NULL; + } + firstrow = (LONGLONG) firstrow_py; + + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + + if (!PyList_Check(colnum_list)) { + PyErr_SetString(PyExc_ValueError,"colnums must be a list"); + return NULL; + } + if (!PyList_Check(array_list)) { + PyErr_SetString(PyExc_ValueError,"colnums must be a list"); + return NULL; + } + ncols = PyList_Size(colnum_list); + if (ncols == 0) { + goto _fitsio_pywrap_write_columns_bail; + } + if (ncols != PyList_Size(array_list)) { + PyErr_Format(PyExc_ValueError,"colnum and array lists not same size: %ld/%ld", + ncols, PyList_Size(array_list)); + } + + // from here on we'll have some temporary arrays we have to free + is_string = calloc(ncols, sizeof(int)); + colnums = calloc(ncols, sizeof(int)); + array_ptrs = calloc(ncols, sizeof(void*)); + nperrow = calloc(ncols, sizeof(LONGLONG)); + fits_dtypes = calloc(ncols, sizeof(int)); + for (icol=0; icol= 3 + colnums[icol] = 1+(int) PyLong_AsLong(tmp_obj); +#else + colnums[icol] = 1+(int) PyInt_AsLong(tmp_obj); +#endif + array_ptrs[icol] = tmp_array; + + nperrow[icol] = 1; + for (j=1; jfits, + colnums[icol], + thisrow, + firstelem, + nperrow[icol], + (char*)data, + &status)) { + set_ioerr_string_from_status(status); + goto _fitsio_pywrap_write_columns_bail; + } + /* + char *strdata=NULL; + strdata = (char* ) data; + + if( fits_write_col_str(self->fits, + colnums[icol], + thisrow, + firstelem, + nperrow[icol], + &strdata, + &status)) { + set_ioerr_string_from_status(status); + goto _fitsio_pywrap_write_columns_bail; + } + */ + + } else if (fits_dtypes[icol] == TBIT) { + if (fits_write_col_bit(self->fits, + colnums[icol], + thisrow, + firstelem, + nperrow[icol], + data, + &status)) { + set_ioerr_string_from_status(status); + goto _fitsio_pywrap_write_columns_bail; + } + } else { + //fprintf(stderr,"row: %ld col: %d\n", (long)thisrow, colnums[icol]); + if( fits_write_col(self->fits, + fits_dtypes[icol], + colnums[icol], + thisrow, + firstelem, + nperrow[icol], + data, + &status)) { + set_ioerr_string_from_status(status); + goto _fitsio_pywrap_write_columns_bail; + } + } + } + } + /* + nelem = PyArray_SIZE(array); + + if (fits_dtype == TSTRING) { + + // this is my wrapper for strings + if (write_string_column(self->fits, colnum, firstrow, firstelem, nelem, data, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + } else { + if( fits_write_col(self->fits, fits_dtype, colnum, firstrow, firstelem, nelem, data, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + } + + // this is a full file close and reopen + if (fits_flush_file(self->fits, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + */ + +_fitsio_pywrap_write_columns_bail: + free(is_string); is_string=NULL; + free(colnums); colnums=NULL; + free(array_ptrs); array_ptrs=NULL; + free(nperrow); nperrow=NULL; + free(fits_dtypes); fits_dtypes=NULL; + if (status != 0) { + return NULL; + } + Py_RETURN_NONE; +} + + + + + + + + +// No error checking performed here +static +int write_var_string_column( + fitsfile *fits, /* I - FITS file pointer */ + int colnum, /* I - number of column to write (1 = 1st col) */ + LONGLONG firstrow, /* I - first row to write (1 = 1st row) */ + PyObject* array, + int *status) { /* IO - error status */ + + LONGLONG firstelem=1; // ignored + LONGLONG nelem=1; // ignored + npy_intp nrows=0; + npy_intp i=0; + char* ptr=NULL; + int res=0; + + PyObject* el=NULL; + char* strdata=NULL; + char* strarr[1]; + + + nrows = PyArray_SIZE(array); + for (i=0; i 0) { + goto write_var_string_column_cleanup; + } + } + +write_var_string_column_cleanup: + + if (*status > 0) { + return 1; + } + + return 0; +} + +/* + * No error checking performed here + */ +static +int write_var_num_column( + fitsfile *fits, /* I - FITS file pointer */ + int colnum, /* I - number of column to write (1 = 1st col) */ + LONGLONG firstrow, /* I - first row to write (1 = 1st row) */ + int fits_dtype, + PyObject* array, + int *status) { /* IO - error status */ + + LONGLONG firstelem=1; + npy_intp nelem=0; + npy_intp nrows=0; + npy_intp i=0; + PyObject* el=NULL; + PyObject* el_array=NULL; + void* data=NULL; + void* ptr=NULL; + + int npy_dtype=0, isvariable=0; + + int mindepth=1, maxdepth=0; + PyObject* context=NULL; + int requirements = + NPY_C_CONTIGUOUS + | NPY_ALIGNED + | NPY_NOTSWAPPED + | NPY_ELEMENTSTRIDES; + + int res=0; + + npy_dtype = fits_to_npy_table_type(fits_dtype, &isvariable); + + nrows = PyArray_SIZE(array); + for (i=0; i 0) { + set_ioerr_string_from_status(*status); + return 1; + } + } + + return 0; +} + + + + +/* + * write a variable length column, starting at firstrow. On the python side, + * the firstrow kwd should default to 1. You can append rows using firstrow = + * nrows+1 + * + * The input array should be of type NPY_OBJECT, and the elements + * should be either all strings or numpy arrays of the same type + */ + +static PyObject * +PyFITSObject_write_var_column(struct PyFITSObject* self, PyObject* args, PyObject* kwds) { + int status=0; + int hdunum=0; + int hdutype=0; + int colnum=0; + PyObject* array=NULL; + + PY_LONG_LONG firstrow_py=0; + LONGLONG firstrow=1; + int npy_dtype=0; + int fits_dtype=0; + + static char *kwlist[] = {"hdunum","colnum","array","firstrow", NULL}; + + if (self->fits == NULL) { + PyErr_SetString(PyExc_ValueError, "fits file is NULL"); + return NULL; + } + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiOL", + kwlist, &hdunum, &colnum, &array, &firstrow_py)) { + return NULL; + } + firstrow = (LONGLONG) firstrow_py; + + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + + if (!PyArray_Check(array)) { + PyErr_SetString(PyExc_ValueError,"only arrays can be written to columns"); + return NULL; + } + + npy_dtype = PyArray_TYPE(array); + if (npy_dtype != NPY_OBJECT) { + PyErr_SetString(PyExc_TypeError,"only object arrays can be written to variable length columns"); + return NULL; + } + + // determine the fits dtype for this column. We will use this to get data + // from the array for writing + if (fits_get_eqcoltypell(self->fits, colnum, &fits_dtype, NULL, NULL, &status) > 0) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (fits_dtype == -TSTRING) { + if (write_var_string_column(self->fits, colnum, firstrow, array, &status)) { + if (status != 0) { + set_ioerr_string_from_status(status); + } + return NULL; + } + } else { + if (write_var_num_column(self->fits, colnum, firstrow, fits_dtype, array, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + } + + // this is a full file close and reopen + if (fits_flush_file(self->fits, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + + Py_RETURN_NONE; +} + + + + +// let python do the conversions +static PyObject * +PyFITSObject_write_string_key(struct PyFITSObject* self, PyObject* args) { + int status=0; + int hdunum=0; + int hdutype=0; + + char* keyname=NULL; + char* value=NULL; + char* comment=NULL; + char* comment_in=NULL; + + if (!PyArg_ParseTuple(args, (char*)"isss", &hdunum, &keyname, &value, &comment_in)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (strlen(comment_in) > 0) { + comment=comment_in; + } + + if (fits_update_key_str(self->fits, keyname, value, comment, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // this does not close and reopen + if (fits_flush_buffer(self->fits, 0, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + Py_RETURN_NONE; +} + +static PyObject * +PyFITSObject_write_double_key(struct PyFITSObject* self, PyObject* args) { + int status=0; + int hdunum=0; + int hdutype=0; + + int decimals=-15; + + char* keyname=NULL; + double value=0; + char* comment=NULL; + char* comment_in=NULL; + + if (!PyArg_ParseTuple(args, (char*)"isds", &hdunum, &keyname, &value, &comment_in)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (strlen(comment_in) > 0) { + comment=comment_in; + } + + if (fits_update_key_dbl(self->fits, keyname, value, decimals, comment, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // this does not close and reopen + if (fits_flush_buffer(self->fits, 0, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + + Py_RETURN_NONE; +} + +static PyObject * +PyFITSObject_write_long_key(struct PyFITSObject* self, PyObject* args) { + int status=0; + int hdunum=0; + int hdutype=0; + + char* keyname=NULL; + long value=0; + char* comment=NULL; + char* comment_in=NULL; + + if (!PyArg_ParseTuple(args, (char*)"isls", &hdunum, &keyname, &value, &comment_in)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (strlen(comment_in) > 0) { + comment=comment_in; + } + + if (fits_update_key_lng(self->fits, keyname, (LONGLONG) value, comment, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // this does not close and reopen + if (fits_flush_buffer(self->fits, 0, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + Py_RETURN_NONE; +} + +static PyObject * +PyFITSObject_write_logical_key(struct PyFITSObject* self, PyObject* args) { + int status=0; + int hdunum=0; + int hdutype=0; + + char* keyname=NULL; + int value=0; + char* comment=NULL; + char* comment_in=NULL; + + if (!PyArg_ParseTuple(args, (char*)"isis", &hdunum, &keyname, &value, &comment_in)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (strlen(comment_in) > 0) { + comment=comment_in; + } + + if (fits_update_key_log(self->fits, keyname, value, comment, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // this does not close and reopen + if (fits_flush_buffer(self->fits, 0, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + Py_RETURN_NONE; +} + +// let python do the conversions +static PyObject * +PyFITSObject_write_comment(struct PyFITSObject* self, PyObject* args) { + int status=0; + int hdunum=0; + int hdutype=0; + + char* comment=NULL; + + if (!PyArg_ParseTuple(args, (char*)"is", &hdunum, &comment)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (fits_write_comment(self->fits, comment, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // this does not close and reopen + if (fits_flush_buffer(self->fits, 0, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + Py_RETURN_NONE; +} + +// let python do the conversions +static PyObject * +PyFITSObject_write_history(struct PyFITSObject* self, PyObject* args) { + int status=0; + int hdunum=0; + int hdutype=0; + + char* history=NULL; + + if (!PyArg_ParseTuple(args, (char*)"is", &hdunum, &history)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (fits_write_history(self->fits, history, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // this does not close and reopen + if (fits_flush_buffer(self->fits, 0, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + Py_RETURN_NONE; +} + +// ADW: Adapted from ffpcom and ffphis in putkey.c +int fits_write_continue( fitsfile *fptr, /* I - FITS file pointer */ + const char *cont, /* I - continue string */ + int *status) /* IO - error status */ +/* + Write 1 or more CONTINUE keywords. If the history string is too + long to fit on a single keyword (72 chars) then it will automatically + be continued on multiple CONTINUE keywords. +*/ +{ + char card[FLEN_CARD]; + int len, ii; + + if (*status > 0) /* inherit input status value if > 0 */ + return(*status); + + len = strlen(cont); + ii = 0; + + for (; len > 0; len -= 72) + { + strcpy(card, "CONTINUE"); + strncat(card, &cont[ii], 72); + ffprec(fptr, card, status); + ii += 72; + } + + return(*status); +} + +// let python do the conversions +static PyObject * +PyFITSObject_write_continue(struct PyFITSObject* self, PyObject* args) { + int status=0; + int hdunum=0; + int hdutype=0; + + char* value=NULL; + + if (!PyArg_ParseTuple(args, (char*)"is", &hdunum, &value)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (fits_write_continue(self->fits, value, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // this does not close and reopen + if (fits_flush_buffer(self->fits, 0, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + Py_RETURN_NONE; +} + + +static PyObject * +PyFITSObject_write_undefined_key(struct PyFITSObject* self, PyObject* args) { + int status=0; + int hdunum=0; + int hdutype=0; + + char* keyname=NULL; + int value=0; + char* comment=NULL; + char* comment_in=NULL; + + if (!PyArg_ParseTuple(args, (char*)"iss", &hdunum, &keyname, &comment_in)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (strlen(comment_in) > 0) { + comment=comment_in; + } + + if (fits_update_key_null(self->fits, keyname, comment, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // this does not close and reopen + if (fits_flush_buffer(self->fits, 0, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + Py_RETURN_NONE; +} + + + +/* + insert a set of rows +*/ + +static PyObject * +PyFITSObject_insert_rows(struct PyFITSObject* self, PyObject* args, PyObject* kwds) { + int status=0; + int hdunum=0; + + int hdutype=0; + PY_LONG_LONG firstrow_py=0, nrows_py=0; + LONGLONG firstrow=0, nrows=0; + + if (self->fits == NULL) { + PyErr_SetString(PyExc_ValueError, "fits file is NULL"); + return NULL; + } + + if (!PyArg_ParseTuple(args, (char*)"iLL", + &hdunum, &firstrow_py, &nrows_py)) { + return NULL; + } + + firstrow = (LONGLONG) firstrow_py; + nrows = (LONGLONG) nrows_py; + + if (nrows <= 0) { + // nothing to do, just return + Py_RETURN_NONE; + } + + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (fits_insert_rows(self->fits, firstrow, nrows, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // this does a full close and reopen + if (fits_flush_file(self->fits, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + Py_RETURN_NONE; +} + + +/* + + delete a range of rows + + input stop is like a python slice, so exclusive, but 1-offset + rather than 0-offset +*/ + +static PyObject * +PyFITSObject_delete_row_range(struct PyFITSObject* self, PyObject* args, PyObject* kwds) { + int status=0; + int hdunum=0; + + int hdutype=0; + PY_LONG_LONG slice_start_py=0, slice_stop_py=0; + LONGLONG slice_start=0, slice_stop=0, nrows=0; + + if (self->fits == NULL) { + PyErr_SetString(PyExc_ValueError, "fits file is NULL"); + return NULL; + } + + if (!PyArg_ParseTuple(args, (char*)"iLL", + &hdunum, &slice_start_py, &slice_stop_py)) { + return NULL; + } + + slice_start = (LONGLONG) slice_start_py; + slice_stop = (LONGLONG) slice_stop_py; + nrows = slice_stop - slice_start; + + if (nrows <= 0) { + // nothing to do, just return + Py_RETURN_NONE; + } + + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (fits_delete_rows(self->fits, slice_start, nrows, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // this does a full close and reopen + if (fits_flush_file(self->fits, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + Py_RETURN_NONE; +} + +/* + + delete a specific set of rows, 1-offset + + no type checking is applied to the rows +*/ + +static PyObject * +PyFITSObject_delete_rows(struct PyFITSObject* self, PyObject* args, PyObject* kwds) { + int status=0; + int hdunum=0; + + int hdutype=0; + PyObject *rows_array=NULL; + LONGLONG *rows=NULL, nrows=0; + + if (self->fits == NULL) { + PyErr_SetString(PyExc_ValueError, "fits file is NULL"); + return NULL; + } + + if (!PyArg_ParseTuple(args, (char*)"iO", + &hdunum, &rows_array)) { + return NULL; + } + + rows = (LONGLONG *) PyArray_DATA(rows_array); + nrows = PyArray_SIZE(rows_array); + if (nrows <= 0) { + Py_RETURN_NONE; + } + + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (fits_delete_rowlistll(self->fits, rows, nrows, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // this does a full close and reopen + if (fits_flush_file(self->fits, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + Py_RETURN_NONE; +} + + + + +/* + * read a single, entire column from an ascii table into the input array. This + * version uses the standard read column instead of our by-bytes version. + * + * A number of assumptions are made, such as that columns are scalar, which + * is true for ascii. + */ + +static int read_ascii_column_all(fitsfile* fits, int colnum, PyObject* array, int* status) { + + int npy_dtype=0; + int fits_dtype=0; + + npy_intp nelem=0; + LONGLONG firstelem=1; + LONGLONG firstrow=1; + int* anynul=NULL; + void* nulval=0; + char* nulstr=" "; + void* data=NULL; + char* cdata=NULL; + + npy_dtype = PyArray_TYPE(array); + fits_dtype = npy_to_fits_table_type(npy_dtype,0); + + nelem = PyArray_SIZE(array); + + if (fits_dtype == TSTRING) { + npy_intp i=0; + LONGLONG rownum=0; + + for (i=0; i 0) { + return 1; + } + } + + /* + + LONGLONG twidth=0; + char** strdata=NULL; + + cdata = (char*) PyArray_DATA(array); + + strdata=malloc(nelem*sizeof(char*)); + if (NULL==strdata) { + PyErr_SetString(PyExc_MemoryError, "could not allocate temporary string pointers"); + *status = 99; + return 1; + + } + + + twidth=fits->Fptr->tableptr[colnum-1].twidth; + for (i=0; i 0) { + free(strdata); + return 1; + } + + free(strdata); + */ + + } else { + data=PyArray_DATA(array); + if (fits_read_col(fits,fits_dtype,colnum,firstrow,firstelem,nelem,nulval,data,anynul,status) > 0) { + return 1; + } + } + + return 0; + +} +static int read_ascii_column_byrow( + fitsfile* fits, int colnum, PyObject* array, PyObject* rowsObj, int* status) { + + int npy_dtype=0; + int fits_dtype=0; + + npy_intp nelem=0; + LONGLONG firstelem=1; + LONGLONG rownum=0; + npy_intp nrows=-1; + + int* anynul=NULL; + void* nulval=0; + char* nulstr=" "; + void* data=NULL; + char* cdata=NULL; + + int dorows=0; + + npy_intp i=0; + + npy_dtype = PyArray_TYPE(array); + fits_dtype = npy_to_fits_table_type(npy_dtype,0); + + nelem = PyArray_SIZE(array); + + + if (rowsObj != Py_None) { + dorows=1; + nrows = PyArray_SIZE(rowsObj); + if (nrows != nelem) { + PyErr_Format(PyExc_ValueError, + "input array[%ld] and rows[%ld] have different size", nelem,nrows); + return 1; + } + } + + data = PyArray_GETPTR1(array, i); + for (i=0; i 0) { + return 1; + } + } else { + if (fits_read_col(fits,fits_dtype,colnum,rownum,firstelem,1,nulval,data,anynul,status) > 0) { + return 1; + } + } + } + + return 0; +} + + +static int read_ascii_column(fitsfile* fits, int colnum, PyObject* array, PyObject* rowsObj, int* status) { + + int ret=0; + if (rowsObj != Py_None || !PyArray_ISCONTIGUOUS(array)) { + ret = read_ascii_column_byrow(fits, colnum, array, rowsObj, status); + } else { + ret = read_ascii_column_all(fits, colnum, array, status); + } + + return ret; +} + + + + + +// read a subset of rows for the input column +// the row array is assumed to be unique and sorted. +static int read_binary_column( + fitsfile* fits, + int colnum, + npy_intp nrows, + npy_int64* rows, + void* data, + npy_intp stride, + int* status) { + + FITSfile* hdu=NULL; + tcolumn* colptr=NULL; + LONGLONG file_pos=0, irow=0; + npy_int64 row=0; + + LONGLONG repeat=0; + LONGLONG width=0; + + int rows_sent=0; + + // use char for pointer arith. It's actually ok to use void as char but + // this is just in case. + char* ptr=NULL; + + // using struct defs here, could cause problems + hdu = fits->Fptr; + colptr = hdu->tableptr + (colnum-1); + + repeat = colptr->trepeat; + width = colptr->tdatatype == TSTRING ? 1 : colptr->twidth; + + rows_sent = nrows == hdu->numrows ? 0 : 1; + + ptr = (char*) data; + for (irow=0; irowdatastart + row*hdu->rowlength + colptr->tbcol; + ffmbyt(fits, file_pos, REPORT_EOF, status); + if (ffgbytoff(fits, width, repeat, 0, (void*)ptr, status)) { + return 1; + } + ptr += stride; + } + + return 0; +} + + + + +/* + * read from a column into an input array + */ +static PyObject * +PyFITSObject_read_column(struct PyFITSObject* self, PyObject* args) { + int hdunum=0; + int hdutype=0; + int colnum=0; + + FITSfile* hdu=NULL; + int status=0; + + PyObject* array=NULL; + + PyObject* rowsObj; + + if (!PyArg_ParseTuple(args, (char*)"iiOO", &hdunum, &colnum, &array, &rowsObj)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // using struct defs here, could cause problems + hdu = self->fits->Fptr; + if (hdutype == IMAGE_HDU) { + PyErr_SetString(PyExc_RuntimeError, "Cannot yet read columns from an IMAGE_HDU"); + return NULL; + } + if (colnum < 1 || colnum > hdu->tfield) { + PyErr_SetString(PyExc_RuntimeError, "requested column is out of bounds"); + return NULL; + } + + + if (hdutype == ASCII_TBL) { + if (read_ascii_column(self->fits, colnum, array, rowsObj, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + } else { + void* data=PyArray_DATA(array); + npy_intp nrows=0; + npy_int64* rows=NULL; + npy_intp stride=PyArray_STRIDE(array,0); + if (rowsObj == Py_None) { + nrows = hdu->numrows; + } else { + rows = get_int64_from_array(rowsObj, &nrows); + } + + if (read_binary_column(self->fits, colnum, nrows, rows, data, stride, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + } + Py_RETURN_NONE; +} + + + + +/* + * Free all the elements in the python list as well as the list itself + */ +static void free_all_python_list(PyObject* list) { + if (PyList_Check(list)) { + Py_ssize_t i=0; + for (i=0; i 0) { + goto read_var_string_cleanup; + } +#if PY_MAJOR_VERSION >= 3 + // bytes + stringObj = Py_BuildValue("y",str); +#else + stringObj = Py_BuildValue("s",str); +#endif + if (NULL == stringObj) { + PyErr_Format(PyExc_MemoryError, + "Could not allocate py string of size %lld", nchar); + goto read_var_string_cleanup; + } + +read_var_string_cleanup: + free(str); + + return stringObj; +} +static PyObject* +read_var_nums(fitsfile* fits, int colnum, LONGLONG row, LONGLONG nelem, + int fits_dtype, int npy_dtype, int* status) { + LONGLONG firstelem=1; + PyObject* arrayObj=NULL; + void* nulval=0; + int* anynul=NULL; + npy_intp dims[1]; + int fortran=0; + void* data=NULL; + + + dims[0] = nelem; + arrayObj=PyArray_ZEROS(1, dims, npy_dtype, fortran); + if (arrayObj==NULL) { + PyErr_Format(PyExc_MemoryError, + "Could not allocate array type %d size %lld",npy_dtype,nelem); + return NULL; + } + data = PyArray_DATA(arrayObj); + if (fits_read_col(fits,abs(fits_dtype),colnum,row,firstelem,nelem,nulval,data,anynul,status) > 0) { + Py_XDECREF(arrayObj); + return NULL; + } + + return arrayObj; +} +/* + * read a variable length column as a list of arrays + * what about strings? + */ +static PyObject * +PyFITSObject_read_var_column_as_list(struct PyFITSObject* self, PyObject* args) { + int hdunum=0; + int colnum=0; + PyObject* rowsObj=NULL; + + int hdutype=0; + int ncols=0; + const npy_int64* rows=NULL; + LONGLONG nrows=0; + int get_all_rows=0; + + int status=0, tstatus=0; + + int fits_dtype=0; + int npy_dtype=0; + int isvariable=0; + LONGLONG repeat=0; + LONGLONG width=0; + LONGLONG offset=0; + LONGLONG i=0; + LONGLONG row=0; + + PyObject* listObj=NULL; + PyObject* tempObj=NULL; + + if (!PyArg_ParseTuple(args, (char*)"iiO", &hdunum, &colnum, &rowsObj)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (hdutype == IMAGE_HDU) { + PyErr_SetString(PyExc_RuntimeError, "Cannot yet read columns from an IMAGE_HDU"); + return NULL; + } + // using struct defs here, could cause problems + fits_get_num_cols(self->fits, &ncols, &status); + if (colnum < 1 || colnum > ncols) { + PyErr_SetString(PyExc_RuntimeError, "requested column is out of bounds"); + return NULL; + } + + if (fits_get_coltypell(self->fits, colnum, &fits_dtype, &repeat, &width, &status) > 0) { + set_ioerr_string_from_status(status); + return NULL; + } + + npy_dtype = fits_to_npy_table_type(fits_dtype, &isvariable); + if (npy_dtype < 0) { + return NULL; + } + if (!isvariable) { + PyErr_Format(PyExc_TypeError,"Column %d not a variable length %d", colnum, fits_dtype); + return NULL; + } + + if (rowsObj == Py_None) { + fits_get_num_rowsll(self->fits, &nrows, &tstatus); + get_all_rows=1; + } else { + npy_intp tnrows=0; + rows = (const npy_int64*) get_int64_from_array(rowsObj, &tnrows); + nrows=(LONGLONG) tnrows; + get_all_rows=0; + } + + listObj = PyList_New(0); + + for (i=0; ifits, colnum, row, &repeat, &offset, &status) > 0) { + goto read_var_column_cleanup; + } + + if (fits_dtype == -TSTRING) { + tempObj = read_var_string(self->fits,colnum,row,repeat,&status); + } else { + tempObj = read_var_nums(self->fits,colnum,row,repeat, + fits_dtype,npy_dtype,&status); + } + if (tempObj == NULL) { + tstatus=1; + goto read_var_column_cleanup; + } + PyList_Append(listObj, tempObj); + Py_XDECREF(tempObj); + } + + +read_var_column_cleanup: + + if (status != 0 || tstatus != 0) { + Py_XDECREF(tempObj); + free_all_python_list(listObj); + if (status != 0) { + set_ioerr_string_from_status(status); + } + return NULL; + } + + return listObj; +} + + +// read specified columns and rows +static int read_binary_rec_columns( + fitsfile* fits, + npy_intp ncols, npy_int64* colnums, + npy_intp nrows, npy_int64* rows, + void* data, int* status) { + FITSfile* hdu=NULL; + tcolumn* colptr=NULL; + LONGLONG file_pos=0; + npy_intp col=0; + npy_int64 colnum=0; + + int rows_sent=0; + npy_intp irow=0; + npy_int64 row=0; + + // use char for pointer arith. It's actually ok to use void as char but + // this is just in case. + char* ptr; + + LONGLONG gsize=0; // number of bytes in column + LONGLONG repeat=0; + LONGLONG width=0; + + // using struct defs here, could cause problems + hdu = fits->Fptr; + + rows_sent = nrows == hdu->numrows ? 0 : 1; + + ptr = (char*) data; + for (irow=0; irowtableptr + (colnum-1); + + repeat = colptr->trepeat; + width = colptr->tdatatype == TSTRING ? 1 : colptr->twidth; + gsize = repeat*width; + + file_pos = hdu->datastart + row*hdu->rowlength + colptr->tbcol; + + if (colptr->tdatatype == TBIT) { + if (fits_read_col_bit(fits, colnum, row+1, 1, repeat, (char*)ptr, status)) { + return 1; + } + } else { + // can just do one status check, since status are inherited. + ffmbyt(fits, file_pos, REPORT_EOF, status); + if (ffgbytoff(fits, width, repeat, 0, (void*)ptr, status)) { + return 1; + } + } + ptr += gsize; + } + } + + return 0; +} + + + +// python method for reading specified columns and rows +static PyObject * +PyFITSObject_read_columns_as_rec(struct PyFITSObject* self, PyObject* args) { + int hdunum=0; + int hdutype=0; + npy_intp ncols=0; + npy_int64* colnums=NULL; + FITSfile* hdu=NULL; + + int status=0; + + PyObject* columnsobj=NULL; + PyObject* array=NULL; + void* data=NULL; + + PyObject* rowsobj=NULL; + + if (!PyArg_ParseTuple(args, (char*)"iOOO", &hdunum, &columnsobj, &array, &rowsobj)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + goto recread_columns_cleanup; + } + + if (hdutype == IMAGE_HDU) { + PyErr_SetString(PyExc_RuntimeError, "Cannot read IMAGE_HDU into a recarray"); + return NULL; + } + + colnums = get_int64_from_array(columnsobj, &ncols); + if (colnums == NULL) { + return NULL; + } + + hdu = self->fits->Fptr; + data = PyArray_DATA(array); + npy_intp nrows; + npy_int64* rows=NULL; + if (rowsobj == Py_None) { + nrows = hdu->numrows; + } else { + rows = get_int64_from_array(rowsobj, &nrows); + } + if (read_binary_rec_columns(self->fits, ncols, colnums, nrows, rows, data, &status)) { + goto recread_columns_cleanup; + } + +recread_columns_cleanup: + + if (status != 0) { + set_ioerr_string_from_status(status); + return NULL; + } + Py_RETURN_NONE; +} + + + +/* + * read specified columns and rows + * + * Move by offset instead of just groupsize; this allows us to read into a + * recarray while skipping some fields, e.g. variable length array fields, to + * be read separately. + * + * If rows is NULL, then nrows are read consecutively. + */ + +static int read_columns_as_rec_byoffset( + fitsfile* fits, + npy_intp ncols, + const npy_int64* colnums, // columns to read from file + const npy_int64* field_offsets, // offsets of corresponding fields within array + npy_intp nrows, + const npy_int64* rows, + char* data, + npy_intp recsize, + int* status) { + + FITSfile* hdu=NULL; + tcolumn* colptr=NULL; + LONGLONG file_pos=0; + npy_intp col=0; + npy_int64 colnum=0; + + char* ptr=NULL; + + int get_all_rows=1; + npy_intp irow=0; + npy_int64 row=0; + + long groupsize=0; // number of bytes in column + long ngroups=1; // number to read, one for row-by-row reading + long group_gap=0; // gap between groups, zero since we aren't using it + + if (rows != NULL) { + get_all_rows=0; + } + + // using struct defs here, could cause problems + hdu = fits->Fptr; + for (irow=0; irowtableptr + (colnum-1); + + groupsize = get_groupsize(colptr); + + file_pos = hdu->datastart + row*hdu->rowlength + colptr->tbcol; + + // can just do one status check, since status are inherited. + ffmbyt(fits, file_pos, REPORT_EOF, status); + if (ffgbytoff(fits, groupsize, ngroups, group_gap, (void*) ptr, status)) { + return 1; + } + } + } + + return 0; +} + + + + + + + +/* python method for reading specified columns and rows, moving by offset in + * the array to allow some fields not read. + * + * columnsObj is the columns in the fits file to read. + * offsetsObj is the offsets of the corresponding fields into the array. + */ +static PyObject * +PyFITSObject_read_columns_as_rec_byoffset(struct PyFITSObject* self, PyObject* args) { + int status=0; + int hdunum=0; + int hdutype=0; + + npy_intp ncols=0; + npy_intp noffsets=0; + npy_intp nrows=0; + const npy_int64* colnums=NULL; + const npy_int64* offsets=NULL; + const npy_int64* rows=NULL; + + PyObject* columnsObj=NULL; + PyObject* offsetsObj=NULL; + PyObject* rowsObj=NULL; + + PyObject* array=NULL; + void* data=NULL; + npy_intp recsize=0; + + if (!PyArg_ParseTuple(args, (char*)"iOOOO", &hdunum, &columnsObj, &offsetsObj, &array, &rowsObj)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + goto recread_columns_byoffset_cleanup; + } + + if (hdutype == IMAGE_HDU) { + PyErr_SetString(PyExc_RuntimeError, "Cannot read IMAGE_HDU into a recarray"); + return NULL; + } + + colnums = (const npy_int64*) get_int64_from_array(columnsObj, &ncols); + if (colnums == NULL) { + return NULL; + } + offsets = (const npy_int64*) get_int64_from_array(offsetsObj, &noffsets); + if (offsets == NULL) { + return NULL; + } + if (noffsets != ncols) { + PyErr_Format(PyExc_ValueError, + "%ld columns requested but got %ld offsets", + ncols, noffsets); + return NULL; + } + + if (rowsObj != Py_None) { + rows = (const npy_int64*) get_int64_from_array(rowsObj, &nrows); + } else { + nrows = PyArray_SIZE(array); + } + + data = PyArray_DATA(array); + recsize = PyArray_ITEMSIZE(array); + if (read_columns_as_rec_byoffset( + self->fits, + ncols, colnums, offsets, + nrows, + rows, + (char*) data, + recsize, + &status) > 0) { + goto recread_columns_byoffset_cleanup; + } + +recread_columns_byoffset_cleanup: + + if (status != 0) { + set_ioerr_string_from_status(status); + return NULL; + } + Py_RETURN_NONE; +} + + + +// read specified rows, all columns +static int read_rec_bytes_byrow( + fitsfile* fits, + npy_intp nrows, npy_int64* rows, + void* data, int* status) { + + FITSfile* hdu=NULL; + + npy_intp irow=0; + LONGLONG firstrow=1; + LONGLONG firstchar=1; + + // use char for pointer arith. It's actually ok to use void as char but + // this is just in case. + unsigned char* ptr; + + // using struct defs here, could cause problems + hdu = fits->Fptr; + ptr = (unsigned char*) data; + + for (irow=0; irowrowlength, ptr, status)) { + return 1; + } + + ptr += hdu->rowlength; + } + + return 0; +} +// read specified rows, all columns +/* +static int read_rec_bytes_byrowold( + fitsfile* fits, + npy_intp nrows, npy_int64* rows, + void* data, int* status) { + FITSfile* hdu=NULL; + LONGLONG file_pos=0; + + npy_intp irow=0; + npy_int64 row=0; + + // use char for pointer arith. It's actually ok to use void as char but + // this is just in case. + char* ptr; + + long ngroups=1; // number to read, one for row-by-row reading + long offset=0; // gap between groups, not stride. zero since we aren't using it + + // using struct defs here, could cause problems + hdu = fits->Fptr; + ptr = (char*) data; + + for (irow=0; irowdatastart + row*hdu->rowlength; + + // can just do one status check, since status are inherited. + ffmbyt(fits, file_pos, REPORT_EOF, status); + if (ffgbytoff(fits, hdu->rowlength, ngroups, offset, (void*) ptr, status)) { + return 1; + } + ptr += hdu->rowlength; + } + + return 0; +} +*/ + + +// python method to read all columns but subset of rows +static PyObject * +PyFITSObject_read_rows_as_rec(struct PyFITSObject* self, PyObject* args) { + int hdunum=0; + int hdutype=0; + + int status=0; + PyObject* array=NULL; + void* data=NULL; + + PyObject* rowsObj=NULL; + npy_intp nrows=0; + npy_int64* rows=NULL; + + if (!PyArg_ParseTuple(args, (char*)"iOO", &hdunum, &array, &rowsObj)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + goto recread_byrow_cleanup; + } + + if (hdutype == IMAGE_HDU) { + PyErr_SetString(PyExc_RuntimeError, "Cannot read IMAGE_HDU into a recarray"); + return NULL; + } + + data = PyArray_DATA(array); + + rows = get_int64_from_array(rowsObj, &nrows); + if (rows == NULL) { + return NULL; + } + + if (read_rec_bytes_byrow(self->fits, nrows, rows, data, &status)) { + goto recread_byrow_cleanup; + } + +recread_byrow_cleanup: + + if (status != 0) { + set_ioerr_string_from_status(status); + return NULL; + } + Py_RETURN_NONE; +} + + + + + /* Read the range of rows, 1-offset. It is assumed the data match the table + * perfectly. + */ + +static int read_rec_range(fitsfile* fits, LONGLONG firstrow, LONGLONG nrows, void* data, int* status) { + // can also use this for reading row ranges + LONGLONG firstchar=1; + LONGLONG nchars=0; + + nchars = (fits->Fptr)->rowlength*nrows; + + if (fits_read_tblbytes(fits, firstrow, firstchar, nchars, (unsigned char*) data, status)) { + return 1; + } + + return 0; +} + + + + +/* here rows are 1-offset, unlike when reading a specific subset of rows */ +static PyObject * +PyFITSObject_read_as_rec(struct PyFITSObject* self, PyObject* args) { + int hdunum=0; + int hdutype=0; + + int status=0; + PyObject* array=NULL; + void* data=NULL; + + PY_LONG_LONG firstrow=0; + PY_LONG_LONG lastrow=0; + PY_LONG_LONG nrows=0; + + if (!PyArg_ParseTuple(args, (char*)"iLLO", &hdunum, &firstrow, &lastrow, &array)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + goto recread_asrec_cleanup; + } + + if (hdutype == IMAGE_HDU) { + PyErr_SetString(PyExc_RuntimeError, "Cannot read IMAGE_HDU into a recarray"); + return NULL; + } + + data = PyArray_DATA(array); + + nrows=lastrow-firstrow+1; + if (read_rec_range(self->fits, (LONGLONG)firstrow, (LONGLONG)nrows, data, &status)) { + goto recread_asrec_cleanup; + } + +recread_asrec_cleanup: + + if (status != 0) { + set_ioerr_string_from_status(status); + return NULL; + } + Py_RETURN_NONE; +} + + +// read an n-dimensional "image" into the input array. Only minimal checking +// of the input array is done. +// Note numpy allows a maximum of 32 dimensions +static PyObject * +PyFITSObject_read_image(struct PyFITSObject* self, PyObject* args) { + int hdunum=0; + int hdutype=0; + int status=0; + PyObject* array=NULL; + void* data=NULL; + int npy_dtype=0; + int dummy=0, fits_read_dtype=0; + + int maxdim=NUMPY_MAX_DIMS; // numpy maximum + int datatype=0; // type info for axis + int naxis=0; // number of axes + int i=0; + LONGLONG naxes[NUMPY_MAX_DIMS];; // size of each axis + LONGLONG firstpixels[NUMPY_MAX_DIMS]; + LONGLONG size=0; + npy_intp arrsize=0; + + int anynul=0; + + if (!PyArg_ParseTuple(args, (char*)"iO", &hdunum, &array)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + return NULL; + } + + if (fits_get_img_paramll(self->fits, maxdim, &datatype, &naxis, + naxes, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + // make sure dims match + size=0; + size = naxes[0]; + for (i=1; i< naxis; i++) { + size *= naxes[i]; + } + arrsize = PyArray_SIZE(array); + data = PyArray_DATA(array); + + if (size != arrsize) { + PyErr_Format(PyExc_RuntimeError, + "Input array size is %ld but on disk array size is %lld", + arrsize, size); + return NULL; + } + + npy_dtype = PyArray_TYPE(array); + npy_to_fits_image_types(npy_dtype, &dummy, &fits_read_dtype); + + for (i=0; ifits, fits_read_dtype, firstpixels, size, + 0, data, &anynul, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + + Py_RETURN_NONE; +} + +static PyObject * +PyFITSObject_read_raw(struct PyFITSObject* self, PyObject* args) { + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + //fitsfile* fits = self->fits; + FITSfile* FITS = self->fits->Fptr; + int status = 0; + char* filedata; + LONGLONG sz; + LONGLONG io_pos; + PyObject *stringobj; + + // Flush (close & reopen HDU) to make everything consistent + ffflus(self->fits, &status); + if (status) { + PyErr_Format(PyExc_RuntimeError, + "Failed to flush FITS file data to disk; CFITSIO code %i", + status); + return NULL; + } + // Allocate buffer for string + sz = FITS->filesize; + // Create python string object of requested size, unitialized + stringobj = PyBytes_FromStringAndSize(NULL, sz); + if (!stringobj) { + PyErr_Format(PyExc_RuntimeError, + "Failed to allocate python string object to hold FITS file data: %i bytes", + (int)sz); + return NULL; + } + // Grab pointer to the memory buffer of the python string object + filedata = PyBytes_AsString(stringobj); + if (!filedata) { + Py_DECREF(stringobj); + return NULL; + } + // Remember old file position + io_pos = FITS->io_pos; + // Seek to beginning of file + if (ffseek(FITS, 0)) { + Py_DECREF(stringobj); + PyErr_Format(PyExc_RuntimeError, + "Failed to seek to beginning of FITS file"); + return NULL; + } + // Read into filedata + if (ffread(FITS, sz, filedata, &status)) { + Py_DECREF(stringobj); + PyErr_Format(PyExc_RuntimeError, + "Failed to read file data into memory: CFITSIO code %i", + status); + return NULL; + } + // Seek back to where we were + if (ffseek(FITS, io_pos)) { + Py_DECREF(stringobj); + PyErr_Format(PyExc_RuntimeError, + "Failed to seek back to original FITS file position"); + return NULL; + } + return stringobj; +} + +static int get_long_slices(PyObject* fpix_arr, + PyObject* lpix_arr, + PyObject* step_arr, + long** fpix, + long** lpix, + long** step) { + + int i=0; + npy_int64* ptr=NULL; + npy_intp fsize=0, lsize=0, ssize=0; + + fsize=PyArray_SIZE(fpix_arr); + lsize=PyArray_SIZE(lpix_arr); + ssize=PyArray_SIZE(step_arr); + + if (lsize != fsize || ssize != fsize) { + PyErr_SetString(PyExc_RuntimeError, + "start/end/step must be same len"); + return 1; + } + + *fpix=calloc(fsize, sizeof(long)); + *lpix=calloc(fsize, sizeof(long)); + *step=calloc(fsize, sizeof(long)); + + for (i=0;ifits, hdunum, &hdutype, &status)) { + return NULL; + } + + if (get_long_slices(fpix_arr,lpix_arr,step_arr, + &fpix,&lpix,&step)) { + return NULL; + } + data = PyArray_DATA(array); + + npy_dtype = PyArray_TYPE(array); + npy_to_fits_image_types(npy_dtype, &dummy, &fits_read_dtype); + + if (fits_read_subset(self->fits, fits_read_dtype, fpix, lpix, step, + 0, data, &anynul, &status)) { + set_ioerr_string_from_status(status); + goto read_image_slice_cleanup; + } + +read_image_slice_cleanup: + free(fpix); + free(lpix); + free(step); + + if (status != 0) { + return NULL; + } + + Py_RETURN_NONE; +} + + + + +// read the entire header as list of dicts with name,value,comment and full +// card +static PyObject * +PyFITSObject_read_header(struct PyFITSObject* self, PyObject* args) { + int status=0; + int hdunum=0; + int hdutype=0; + + char keyname[FLEN_KEYWORD]; + char value[FLEN_VALUE]; + char comment[FLEN_COMMENT]; + char card[FLEN_CARD]; + + int nkeys=0, morekeys=0, i=0; + + PyObject* list=NULL; + PyObject* dict=NULL; // to hold the dict for each record + + + if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) { + return NULL; + } + + if (self->fits == NULL) { + PyErr_SetString(PyExc_RuntimeError, "FITS file is NULL"); + return NULL; + } + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (fits_get_hdrspace(self->fits, &nkeys, &morekeys, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + list=PyList_New(nkeys); + for (i=0; ifits, i+1, card, &status)) { + // is this enough? + Py_XDECREF(list); + set_ioerr_string_from_status(status); + return NULL; + } + + // this just returns the character string stored in the header; we + // can eval in python + if (fits_read_keyn(self->fits, i+1, keyname, value, comment, &status)) { + // is this enough? + Py_XDECREF(list); + set_ioerr_string_from_status(status); + return NULL; + } + + dict = PyDict_New(); + add_string_to_dict(dict,"card_string",card); + add_string_to_dict(dict,"name",keyname); + add_string_to_dict(dict,"value",value); + add_string_to_dict(dict,"comment",comment); + + // PyList_SetItem and PyTuple_SetItem only exceptions, don't + // have to decref the object set + PyList_SetItem(list, i, dict); + + } + + return list; +} + +static PyObject * +PyFITSObject_write_checksum(struct PyFITSObject* self, PyObject* args) { + int status=0; + int hdunum=0; + int hdutype=0; + + unsigned long datasum=0; + unsigned long hdusum=0; + + PyObject* dict=NULL; + + if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) { + return NULL; + } + + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (fits_write_chksum(self->fits, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + if (fits_get_chksum(self->fits, &datasum, &hdusum, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + dict=PyDict_New(); + add_long_long_to_dict(dict,"datasum",(long long)datasum); + add_long_long_to_dict(dict,"hdusum",(long long)hdusum); + + return dict; +} +static PyObject * +PyFITSObject_verify_checksum(struct PyFITSObject* self, PyObject* args) { + int status=0; + int hdunum=0; + int hdutype=0; + + int dataok=0, hduok=0; + + PyObject* dict=NULL; + + if (!PyArg_ParseTuple(args, (char*)"i", &hdunum)) { + return NULL; + } + + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (fits_verify_chksum(self->fits, &dataok, &hduok, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + dict=PyDict_New(); + add_long_to_dict(dict,"dataok",(long)dataok); + add_long_to_dict(dict,"hduok",(long)hduok); + + return dict; +} + + + +static PyObject * +PyFITSObject_where(struct PyFITSObject* self, PyObject* args) { + int status=0; + int hdunum=0; + int hdutype=0; + char* expression=NULL; + + LONGLONG nrows=0; + + long firstrow=1; + long ngood=0; + char* row_status=NULL; + + + // Indices of rows for which expression is true + PyObject* indicesObj=NULL; + int ndim=1; + npy_intp dims[1]; + npy_intp* data=NULL; + long i=0; + + + if (!PyArg_ParseTuple(args, (char*)"is", &hdunum, &expression)) { + return NULL; + } + + if (fits_movabs_hdu(self->fits, hdunum, &hdutype, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + if (fits_get_num_rowsll(self->fits, &nrows, &status)) { + set_ioerr_string_from_status(status); + return NULL; + } + + row_status = malloc(nrows*sizeof(char)); + if (row_status==NULL) { + PyErr_SetString(PyExc_MemoryError, "Could not allocate row_status array"); + return NULL; + } + + if (fits_find_rows(self->fits, expression, firstrow, (long) nrows, &ngood, row_status, &status)) { + set_ioerr_string_from_status(status); + goto where_function_cleanup; + } + + dims[0] = ngood; + indicesObj = PyArray_EMPTY(ndim, dims, NPY_INTP, 0); + if (indicesObj == NULL) { + PyErr_SetString(PyExc_MemoryError, "Could not allocate index array"); + goto where_function_cleanup; + } + + if (ngood > 0) { + data = PyArray_DATA(indicesObj); + + for (i=0; i= 3 + PyVarObject_HEAD_INIT(NULL, 0) +#else + PyObject_HEAD_INIT(NULL) + 0, /*ob_size*/ +#endif + "_fitsio.FITS", /*tp_name*/ + sizeof(struct PyFITSObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor)PyFITSObject_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + //0, /*tp_repr*/ + (reprfunc)PyFITSObject_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ + "FITSIO Class", /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + PyFITSObject_methods, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + //0, /* tp_init */ + (initproc)PyFITSObject_init, /* tp_init */ + 0, /* tp_alloc */ + //PyFITSObject_new, /* tp_new */ + PyType_GenericNew, /* tp_new */ +}; + + +static PyMethodDef fitstype_methods[] = { + {"cfitsio_version", (PyCFunction)PyFITS_cfitsio_version, METH_NOARGS, "cfitsio_version\n\nReturn the cfitsio version."}, + {"parse_card", (PyCFunction)PyFITS_parse_card, METH_VARARGS, "parse_card\n\nparse the card to get the key name, value (as a string), data type and comment."}, + {"get_keytype", (PyCFunction)PyFITS_get_keytype, METH_VARARGS, "get_keytype\n\nparse the card to get the key type."}, + {"get_key_meta", (PyCFunction)PyFITS_get_key_meta, METH_VARARGS, "get_key_meta\n\nparse the card to get key metadata (keyclass,dtype)."}, + {NULL} /* Sentinel */ +}; + +#if PY_MAJOR_VERSION >= 3 + static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_fitsio_wrap", /* m_name */ + "Defines the FITS class and some methods", /* m_doc */ + -1, /* m_size */ + fitstype_methods, /* m_methods */ + NULL, /* m_reload */ + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL, /* m_free */ + }; +#endif + + +#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */ +#define PyMODINIT_FUNC void +#endif +PyMODINIT_FUNC +#if PY_MAJOR_VERSION >= 3 +PyInit__fitsio_wrap(void) +#else +init_fitsio_wrap(void) +#endif +{ + PyObject* m; + + PyFITSType.tp_new = PyType_GenericNew; + +#if PY_MAJOR_VERSION >= 3 + if (PyType_Ready(&PyFITSType) < 0) { + return NULL; + } + m = PyModule_Create(&moduledef); + if (m==NULL) { + return NULL; + } + +#else + if (PyType_Ready(&PyFITSType) < 0) { + return; + } + m = Py_InitModule3("_fitsio_wrap", fitstype_methods, "Define FITS type and methods."); + if (m==NULL) { + return; + } +#endif + + Py_INCREF(&PyFITSType); + PyModule_AddObject(m, "FITS", (PyObject *)&PyFITSType); + + import_array(); +#if PY_MAJOR_VERSION >= 3 + return m; +#endif +} diff --git a/fitsio/fitslib.py b/fitsio/fitslib.py new file mode 100644 index 0000000..45bf5d6 --- /dev/null +++ b/fitsio/fitslib.py @@ -0,0 +1,4966 @@ +""" +fitslib, part of the fitsio package. + +See the main docs at https://github.com/esheldon/fitsio + + Copyright (C) 2011 Erin Sheldon, BNL. erin dot sheldon at gmail dot com + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +""" +from __future__ import with_statement, print_function +import sys, os +import numpy +import copy +import warnings + +from . import _fitsio_wrap +from .util import FITSRuntimeWarning, cfitsio_version + +# for python3 compat +try: + xrange=xrange +except: + xrange=range + +from functools import reduce + +def read(filename, ext=None, extver=None, **keys): + """ + Convenience function to read data from the specified FITS HDU + + By default, all data are read. For tables, send columns= and rows= to + select subsets of the data. Table data are read into a recarray; use a + FITS object and read_column() to get a single column as an ordinary array. + For images, create a FITS object and use slice notation to read subsets. + + Under the hood, a FITS object is constructed and data are read using + an associated FITSHDU object. + + parameters + ---------- + filename: string + A filename. + ext: number or string, optional + The extension. Either the numerical extension from zero + or a string extension name. If not sent, data is read from + the first HDU that has data. + extver: integer, optional + FITS allows multiple extensions to have the same name (extname). These + extensions can optionally specify an EXTVER version number in the + header. Send extver= to select a particular version. If extver is not + sent, the first one will be selected. If ext is an integer, the extver + is ignored. + columns: list or array, optional + An optional set of columns to read from table HDUs. Default is to + read all. Can be string or number. + rows: optional + An optional list of rows to read from table HDUS. Default is to + read all. + header: bool, optional + If True, read the FITS header and return a tuple (data,header) + Default is False. + case_sensitive: bool, optional + Match column names and extension names with case-sensitivity. Default + is False. + lower: bool, optional + If True, force all columns names to lower case in output + upper: bool, optional + If True, force all columns names to upper case in output + vstorage: string, optional + Set the default method to store variable length columns. Can be + 'fixed' or 'object'. See docs on fitsio.FITS for details. + """ + + with FITS(filename, **keys) as fits: + + header=keys.pop('header',False) + + if ext is None: + for i in xrange(len(fits)): + if fits[i].has_data(): + ext=i + break + if ext is None: + raise IOError("No extensions have data") + + item=_make_item(ext, extver=extver) + + data = fits[item].read(**keys) + if header: + h = fits[item].read_header() + return data, h + else: + return data + + +def read_header(filename, ext=0, extver=None, case_sensitive=False, **keys): + """ + Convenience function to read the header from the specified FITS HDU + + The FITSHDR allows access to the values and comments by name and + number. + + Under the hood, a FITS object is constructed and data are read using + an associated FITSHDU object. + + parameters + ---------- + filename: string + A filename. + ext: number or string, optional + The extension. Either the numerical extension from zero + or a string extension name. Default read primary header. + extver: integer, optional + FITS allows multiple extensions to have the same name (extname). These + extensions can optionally specify an EXTVER version number in the + header. Send extver= to select a particular version. If extver is not + sent, the first one will be selected. If ext is an integer, the extver + is ignored. + case_sensitive: bool, optional + Match extension names with case-sensitivity. Default is False. + """ + item=_make_item(ext,extver=extver) + with FITS(filename, case_sensitive=case_sensitive) as fits: + return fits[item].read_header() + +def read_scamp_head(fname, header=None): + """ + read a SCAMP .head file as a fits header FITSHDR object + + parameters + ---------- + fname: string + The path to the SCAMP .head file + + header: FITSHDR, optional + Optionally combine the header with the input one. The input can + be any object convertable to a FITSHDR object + + returns + ------- + header: FITSHDR + A fits header object of type FITSHDR + """ + + with open(fname) as fobj: + lines=fobj.readlines() + + lines=[l.strip() for l in lines if l[0:3] != 'END'] + + # if header is None an empty FITSHDR is created + hdr=FITSHDR(header) + + for l in lines: + hdr.add_record(l) + + return hdr + +def _make_item(ext, extver=None): + if extver is not None: + # e + item=(ext,extver) + else: + item=ext + + return item + +def write(filename, data, extname=None, extver=None, units=None, + compress=None, table_type='binary', header=None, + clobber=False, **keys): + """ + Convenience function to create a new HDU and write the data. + + Under the hood, a FITS object is constructed. If you want to append rows + to an existing HDU, or modify data in an HDU, please construct a FITS + object. + + parameters + ---------- + filename: string + A filename. + data: + Either a normal n-dimensional array or a recarray. Images are written + to a new IMAGE_HDU and recarrays are written to BINARY_TBl or + ASCII_TBL hdus. + extname: string, optional + An optional name for the new header unit. + extver: integer, optional + FITS allows multiple extensions to have the same name (extname). + These extensions can optionally specify an EXTVER version number in + the header. Send extver= to set a particular version, which will + be represented in the header with keyname EXTVER. The extver must + be an integer > 0. If extver is not sent, the first one will be + selected. If ext is an integer, the extver is ignored. + compress: string, optional + A string representing the compression algorithm for images, default None. + Can be one of + 'RICE' + 'GZIP' + 'GZIP_2' + 'PLIO' (no unsigned or negative integers) + 'HCOMPRESS' + (case-insensitive) See the cfitsio manual for details. + + header: FITSHDR, list, dict, optional + A set of header keys to write. The keys are written before the data + is written to the table, preventing a resizing of the table area. + + Can be one of these: + - FITSHDR object + - list of dictionaries containing 'name','value' and optionally + a 'comment' field; the order is preserved. + - a dictionary of keyword-value pairs; no comments are written + in this case, and the order is arbitrary. + Note required keywords such as NAXIS, XTENSION, etc are cleaed out. + + clobber: bool, optional + If True, overwrite any existing file. Default is to append + a new extension on existing files. + + ignore_empty: bool, optional + Default False. Unless set to True, only allow + empty HDUs in the zero extension. + + + table keywords + -------------- + These keywords are only active when writing tables. + + units: list + A list of strings representing units for each column. + table_type: string, optional + Either 'binary' or 'ascii', default 'binary' + Matching is case-insensitive + write_bitcols: bool, optional + Write boolean arrays in the FITS bitcols format, default False + + + """ + with FITS(filename, 'rw', clobber=clobber, **keys) as fits: + fits.write(data, + table_type=table_type, + units=units, + extname=extname, + extver=extver, + compress=compress, + header=header, + **keys) + + +ANY_HDU=-1 + +READONLY=0 +READWRITE=1 +IMAGE_HDU=0 +ASCII_TBL=1 +BINARY_TBL=2 + +NOCOMPRESS=0 +RICE_1 = 11 +GZIP_1 = 21 +GZIP_2 = 22 +PLIO_1 = 31 +HCOMPRESS_1 = 41 + +class FITS(object): + """ + A class to read and write FITS images and tables. + + This class uses the cfitsio library for almost all relevant work. + + parameters + ---------- + filename: string + The filename to open. + mode: int/string, optional + The mode, either a string or integer. + For reading only + 'r' or 0 + For reading and writing + 'rw' or 1 + You can also use fitsio.READONLY and fitsio.READWRITE. + + Default is 'r' + clobber: bool, optional + If the mode is READWRITE, and clobber=True, then remove any existing + file before opening. + case_sensitive: bool, optional + Match column names and extension names with case-sensitivity. Default + is False. + lower: bool, optional + If True, force all columns names to lower case in output + upper: bool, optional + If True, force all columns names to upper case in output + vstorage: string, optional + A string describing how, by default, to store variable length columns + in the output array. This can be over-ridden when reading by using the + using vstorage keyword to the individual read methods. The options are + + 'fixed': Use a fixed length field in the array, with + dimensions equal to the max possible size for column. + Arrays are padded with zeros. + 'object': Use an object for the field in the array. + Each element will then be an array of the right type, + but only using the memory needed to hold that element. + + Default is 'fixed'. The rationale is that this is the option + of 'least surprise' + iter_row_buffer: integer + Number of rows to buffer when iterating over table HDUs. + Default is 1. + ignore_empty: bool, optional + Default False. Unless set to True, only allow + empty HDUs in the zero extension. + + See the docs at https://github.com/esheldon/fitsio + """ + def __init__(self, filename, mode='r', **keys): + self.keys=keys + filename = extract_filename(filename) + self._filename = filename + + #self.mode=keys.get('mode','r') + self.mode=mode + self.case_sensitive=keys.get('case_sensitive',False) + self.ignore_empty=keys.get('ignore_empty', False) + + self.verbose = keys.get('verbose',False) + clobber = keys.get('clobber',False) + + if self.mode not in _int_modemap: + raise IOError("mode should be one of 'r','rw'," + "READONLY,READWRITE") + + self.charmode = _char_modemap[self.mode] + self.intmode = _int_modemap[self.mode] + + # Will not test existence when reading, let cfitsio + # do the test and report an error. This allows opening + # urls etc. + create=0 + if self.mode in [READWRITE,'rw']: + if clobber: + create=1 + if os.path.exists(filename): + os.remove(filename) + else: + if os.path.exists(filename): + create=0 + else: + create=1 + + self._FITS = _fitsio_wrap.FITS(filename, self.intmode, create) + + def close(self): + """ + Close the fits file and set relevant metadata to None + """ + if hasattr(self,'_FITS'): + if self._FITS is not None: + self._FITS.close() + self._FITS=None + self._filename=None + self.mode=None + self.charmode=None + self.intmode=None + self.hdu_list=None + self.hdu_map=None + + def movabs_ext(self, ext): + """ + Move to the indicated zero-offset extension. + + In general, it is not necessary to use this method explicitly. + """ + return self._FITS.movabs_hdu(ext+1) + + def movabs_hdu(self, hdunum): + """ + Move to the indicated one-offset hdu number. + + In general, it is not necessary to use this method explicitly. + """ + return self._FITS.movabs_hdu(hdunum) + + def movnam_ext(self, extname, hdutype=ANY_HDU, extver=0): + """ + Move to the indicated extension by name + + In general, it is not necessary to use this method explicitly. + + returns the zero-offset extension number + """ + extname=mks(extname) + hdu = self._FITS.movnam_hdu(hdutype, extname, extver) + return hdu-1 + + def movnam_hdu(self, extname, hdutype=ANY_HDU, extver=0): + """ + Move to the indicated HDU by name + + In general, it is not necessary to use this method explicitly. + + returns the one-offset extension number + """ + extname=mks(extname) + hdu = self._FITS.movnam_hdu(hdutype, extname, extver) + return hdu + + def reopen(self): + """ + close and reopen the fits file with the same mode + """ + self._FITS.close() + del self._FITS + self._FITS = _fitsio_wrap.FITS(self._filename, self.intmode, 0) + self.update_hdu_list() + + def write(self, data, units=None, extname=None, extver=None, + compress=None, tile_dims=None, + header=None, + names=None, + table_type='binary', write_bitcols=False, **keys): + """ + Write the data to a new HDU. + + This method is a wrapper. If this is an IMAGE_HDU, write_image is + called, otherwise write_table is called. + + parameters + ---------- + data: ndarray + An n-dimensional image or an array with fields. + extname: string, optional + An optional extension name. + extver: integer, optional + FITS allows multiple extensions to have the same name (extname). + These extensions can optionally specify an EXTVER version number in + the header. Send extver= to set a particular version, which will + be represented in the header with keyname EXTVER. The extver must + be an integer > 0. If extver is not sent, the first one will be + selected. If ext is an integer, the extver is ignored. + header: FITSHDR, list, dict, optional + A set of header keys to write. Can be one of these: + - FITSHDR object + - list of dictionaries containing 'name','value' and optionally + a 'comment' field; the order is preserved. + - a dictionary of keyword-value pairs; no comments are written + in this case, and the order is arbitrary. + Note required keywords such as NAXIS, XTENSION, etc are cleaed out. + + Image-only keywords: + compress: string, optional + A string representing the compression algorithm for images, default None. + Can be one of + 'RICE' + 'GZIP' + 'GZIP_2' + 'PLIO' (no unsigned or negative integers) + 'HCOMPRESS' + (case-insensitive) See the cfitsio manual for details. + + Table-only keywords: + units: list/dec, optional: + A list of strings with units for each column. + table_type: string, optional + Either 'binary' or 'ascii', default 'binary' + Matching is case-insensitive + write_bitcols: bool, optional + Write boolean arrays in the FITS bitcols format, default False + + + restrictions + ------------ + The File must be opened READWRITE + """ + + isimage=False + if data is None: + isimage=True + elif isinstance(data,numpy.ndarray): + if data.dtype.fields == None: + isimage=True + + if isimage: + self.write_image(data, extname=extname, extver=extver, + compress=compress, tile_dims=tile_dims, + header=header) + else: + self.write_table(data, units=units, + extname=extname, extver=extver, header=header, + names=names, + table_type=table_type, + write_bitcols=write_bitcols) + + + + def write_image(self, img, extname=None, extver=None, + compress=None, tile_dims=None, header=None): + """ + Create a new image extension and write the data. + + parameters + ---------- + img: ndarray + An n-dimensional image. + extname: string, optional + An optional extension name. + extver: integer, optional + FITS allows multiple extensions to have the same name (extname). + These extensions can optionally specify an EXTVER version number in + the header. Send extver= to set a particular version, which will + be represented in the header with keyname EXTVER. The extver must + be an integer > 0. If extver is not sent, the first one will be + selected. If ext is an integer, the extver is ignored. + compress: string, optional + A string representing the compression algorithm for images, default None. + Can be one of + 'RICE' + 'GZIP' + 'GZIP_2' + 'PLIO' (no unsigned or negative integers) + 'HCOMPRESS' + (case-insensitive) See the cfitsio manual for details. + header: FITSHDR, list, dict, optional + A set of header keys to write. Can be one of these: + - FITSHDR object + - list of dictionaries containing 'name','value' and optionally + a 'comment' field; the order is preserved. + - a dictionary of keyword-value pairs; no comments are written + in this case, and the order is arbitrary. + Note required keywords such as NAXIS, XTENSION, etc are cleaed out. + + + restrictions + ------------ + The File must be opened READWRITE + """ + + self.create_image_hdu(img, + header=header, + extname=extname, extver=extver, + compress=compress, tile_dims=tile_dims) + + if header is not None: + self[-1].write_keys(header) + self[-1]._update_info() + + #if img is not None: + # self[-1].write(img) + + + def create_image_hdu(self, + img=None, + dims=None, + dtype=None, + extname=None, + extver=None, + compress=None, + tile_dims=None, + header=None): + """ + Create a new, empty image HDU and reload the hdu list. Either + create from an input image or from input dims and dtype + + fits.create_image_hdu(image, ...) + fits.create_image_hdu(dims=dims, dtype=dtype) + + If an image is sent, the data are also written. + + You can write data into the new extension using + fits[extension].write(image) + + Alternatively you can skip calling this function and instead just use + + fits.write(image) + or + fits.write_image(image) + + which will create the new image extension for you with the appropriate + structure, and write the data. + + parameters + ---------- + img: ndarray, optional + An image with which to determine the properties of the HDU. The + data will be written. + dims: sequence, optional + A sequence describing the dimensions of the image to be created + on disk. You must also send a dtype= + dtype: numpy data type + When sending dims= also send the data type. Can be of the + various numpy data type declaration styles, e.g. 'f8', + numpy.float64. + extname: string, optional + An optional extension name. + extver: integer, optional + FITS allows multiple extensions to have the same name (extname). + These extensions can optionally specify an EXTVER version number in + the header. Send extver= to set a particular version, which will + be represented in the header with keyname EXTVER. The extver must + be an integer > 0. If extver is not sent, the first one will be + selected. If ext is an integer, the extver is ignored. + compress: string, optional + A string representing the compression algorithm for images, default None. + Can be one of + 'RICE' + 'GZIP' + 'GZIP_2' + 'PLIO' (no unsigned or negative integers) + 'HCOMPRESS' + (case-insensitive) See the cfitsio manual for details. + + header: FITSHDR, list, dict, optional + This is only used to determine how many slots to reserve for + header keywords + + restrictions + ------------ + The File must be opened READWRITE + """ + + if (img is not None) or (img is None and dims is None): + from_image=True + elif dims is not None: + from_image=False + + if from_image: + img2send=img + if img is not None: + dims=img.shape + dtstr = img.dtype.descr[0][1][1:] + if img.size == 0: + raise ValueError("data must have at least 1 row") + + # data must be c-contiguous and native byte order + if not img.flags['C_CONTIGUOUS']: + # this always makes a copy + img2send = numpy.ascontiguousarray(img) + array_to_native(img2send, inplace=True) + else: + img2send = array_to_native(img, inplace=False) + + else: + self._ensure_empty_image_ok() + compress=None + tile_dims=None + + # we get dims from the input image + dims2send=None + else: + # img was None and dims was sent + if dtype is None: + raise ValueError("send dtype= with dims=") + + # this must work! + dtype=numpy.dtype(dtype) + dtstr = dtype.descr[0][1][1:] + # use the example image to build the type in C + img2send=numpy.zeros(1, dtype=dtype) + + # sending an array simplifies access + dims2send = numpy.array(dims,dtype='i8',ndmin=1) + + if img2send is not None: + if img2send.dtype.fields is not None: + raise ValueError("got record data type, expected regular ndarray") + + if extname is None: + # will be ignored + extname="" + else: + if not isstring(extname): + raise ValueError("extension name must be a string") + extname=mks(extname) + + + if extname is not None and extver is not None: + extver = check_extver(extver) + + if extver is None: + # will be ignored + extver = 0 + + + comptype = get_compress_type(compress) + tile_dims = get_tile_dims(tile_dims, dims) + + if img2send is not None: + check_comptype_img(comptype, dtstr) + + if header is not None: + nkeys=len(header) + else: + nkeys=0 + + self._FITS.create_image_hdu(img2send, + nkeys, + dims=dims2send, + comptype=comptype, + tile_dims=tile_dims, + extname=extname, + extver=extver) + + + # don't rebuild the whole list unless this is the first hdu + # to be created + self.update_hdu_list(rebuild=False) + + + def _ensure_empty_image_ok(self): + """ + If ignore_empty was not set to True, we only allow empty HDU for first + HDU and if there is no data there already + """ + if self.ignore_empty: + return + + if len(self) > 1: + raise RuntimeError("Cannot write None image at extension %d" % len(self)) + if 'ndims' in self[0]._info: + raise RuntimeError("Can only write None images to extension zero, " + "which already exists") + + + def write_table(self, data, table_type='binary', + names=None, formats=None, units=None, + extname=None, extver=None, header=None, + write_bitcols=False): + """ + Create a new table extension and write the data. + + The table definition is taken from the fields in the input array. If + you want to append new rows to the table, access the HDU directly and + use the write() function, e.g. + + fits[extension].append(data) + + parameters + ---------- + data: recarray + A numpy array with fields. The table definition will be + determined from this array. + table_type: string, optional + Either 'binary' or 'ascii', default 'binary' + Matching is case-insensitive + extname: string, optional + An optional string for the extension name. + extver: integer, optional + FITS allows multiple extensions to have the same name (extname). + These extensions can optionally specify an EXTVER version number in + the header. Send extver= to set a particular version, which will + be represented in the header with keyname EXTVER. The extver must + be an integer > 0. If extver is not sent, the first one will be + selected. If ext is an integer, the extver is ignored. + units: list/dec, optional: + A list of strings with units for each column. + header: FITSHDR, list, dict, optional + A set of header keys to write. The keys are written before the data + is written to the table, preventing a resizing of the table area. + + Can be one of these: + - FITSHDR object + - list of dictionaries containing 'name','value' and optionally + a 'comment' field; the order is preserved. + - a dictionary of keyword-value pairs; no comments are written + in this case, and the order is arbitrary. + Note required keywords such as NAXIS, XTENSION, etc are cleaed out. + write_bitcols: boolean, optional + Write boolean arrays in the FITS bitcols format, default False + + restrictions + ------------ + The File must be opened READWRITE + """ + + """ + if data.dtype.fields == None: + raise ValueError("data must have fields") + if data.size == 0: + raise ValueError("data must have at least 1 row") + """ + + + self.create_table_hdu(data=data, + header=header, + names=names, + units=units, + extname=extname, + extver=extver, + table_type=table_type, + write_bitcols=write_bitcols) + + if header is not None: + self[-1].write_keys(header) + self[-1]._update_info() + + self[-1].write(data,names=names) + + def read_raw(self): + """ + Reads the raw FITS file contents, returning a Python string. + """ + return self._FITS.read_raw() + + def create_table_hdu(self, data=None, dtype=None, + header=None, + names=None, formats=None, + units=None, dims=None, extname=None, extver=None, + table_type='binary', write_bitcols=False): + """ + Create a new, empty table extension and reload the hdu list. + + There are three ways to do it: + 1) send a numpy dtype, from which the formats in the fits file will + be determined. + 2) Send an array in data= keyword. this is required if you have + object fields for writing to variable length columns. + 3) send the names,formats and dims yourself + + You can then write data into the new extension using + fits[extension].write(array) + If you want to write to a single column + fits[extension].write_column(array) + But be careful as the other columns will be left zeroed. + + Often you will instead just use write_table to do this all + atomically. + + fits.write_table(recarray) + + write_table will create the new table extension for you with the + appropriate fields. + + parameters + ---------- + dtype: numpy dtype or descriptor, optional + If you have an array with fields, you can just send arr.dtype. You + can also use a list of tuples, e.g. [('x','f8'),('index','i4')] or + a dictionary representation. + data: a numpy array with fields, optional + or a dictionary + + An array or dict from which to determine the table definition. You + must use this instead of sending a descriptor if you have object + array fields, as this is the only way to determine the type and max + size. + + names: list of strings, optional + The list of field names + formats: list of strings, optional + The TFORM format strings for each field. + dims: list of strings, optional + An optional list of dimension strings for each field. Should + match the repeat count for the formats fields. Be careful of + the order since FITS is more like fortran. See the descr2tabledef + function. + + table_type: string, optional + Either 'binary' or 'ascii', default 'binary' + Matching is case-insensitive + units: list of strings, optional + An optional list of unit strings for each field. + extname: string, optional + An optional extension name. + extver: integer, optional + FITS allows multiple extensions to have the same name (extname). + These extensions can optionally specify an EXTVER version number in + the header. Send extver= to set a particular version, which will + be represented in the header with keyname EXTVER. The extver must + be an integer > 0. If extver is not sent, the first one will be + selected. If ext is an integer, the extver is ignored. + write_bitcols: bool, optional + Write boolean arrays in the FITS bitcols format, default False + + header: FITSHDR, list, dict, optional + This is only used to determine how many slots to reserve for + header keywords + + + restrictions + ------------ + The File must be opened READWRITE + """ + + # record this for the TableHDU object + self.keys['write_bitcols'] = write_bitcols + + ## can leave as turn + table_type_int=_extract_table_type(table_type) + + if data is not None: + if isinstance(data,numpy.ndarray): + names, formats, dims = array2tabledef(data, table_type=table_type, + write_bitcols=write_bitcols) + elif isinstance(data, (list,dict)): + names, formats, dims = collection2tabledef(data, names=names, + table_type=table_type, + write_bitcols=write_bitcols) + else: + raise ValueError("data must be an ndarray with fields or a dict") + elif dtype is not None: + dtype=numpy.dtype(dtype) + names, formats, dims = descr2tabledef(dtype.descr,write_bitcols=write_bitcols) + else: + if names is None or formats is None: + raise ValueError("send either dtype=, data=, or names= and formats=") + + if not isinstance(names,list) or not isinstance(formats,list): + raise ValueError("names and formats should be lists") + if len(names) != len(formats): + raise ValueError("names and formats must be same length") + + if dims is not None: + if not isinstance(dims,list): + raise ValueError("dims should be a list") + if len(dims) != len(names): + raise ValueError("names and dims must be same length") + + if units is not None: + if not isinstance(units,list): + raise ValueError("units should be a list") + if len(units) != len(names): + raise ValueError("names and units must be same length") + + if extname is None: + # will be ignored + extname="" + else: + if not isstring(extname): + raise ValueError("extension name must be a string") + extname=mks(extname) + + + + if extname is not None and extver is not None: + extver = check_extver(extver) + if extver is None: + # will be ignored + extver = 0 + if extname is None: + # will be ignored + extname="" + + if header is not None: + nkeys=len(header) + else: + nkeys=0 + + # note we can create extname in the c code for tables, but not images + self._FITS.create_table_hdu(table_type_int, nkeys, + names, formats, tunit=units, tdim=dims, + extname=extname, extver=extver) + + # don't rebuild the whole list unless this is the first hdu + # to be created + self.update_hdu_list(rebuild=False) + + def update_hdu_list(self, rebuild=True): + """ + Force an update of the entire HDU list + + Normally you don't need to call this method directly + + if rebuild is false or the hdu_list is not yet set, the list is + rebuilt from scratch + """ + + if not hasattr(self,'hdu_list'): + rebuild=True + + if rebuild: + self.hdu_list = [] + self.hdu_map={} + + # we don't know how many hdus there are, so iterate + # until we can't open any more + ext_start=0 + else: + # start from last + ext_start=len(self) + + ext=ext_start + while True: + try: + self._append_hdu_info(ext) + except IOError: + break + except RuntimeError: + break + + ext = ext + 1 + + def _append_hdu_info(self, ext): + """ + internal routine + + append info for indiciated extension + """ + + # raised IOError if not found + hdu_type=self._FITS.movabs_hdu(ext+1) + + if hdu_type==IMAGE_HDU: + hdu=ImageHDU(self._FITS, ext, **self.keys) + elif hdu_type==BINARY_TBL: + hdu=TableHDU(self._FITS, ext, **self.keys) + elif hdu_type==ASCII_TBL: + hdu=AsciiTableHDU(self._FITS, ext, **self.keys) + else: + mess=("extension %s is of unknown type %s " + "this is probably a bug") + mess=mess % (ext,hdu_type) + raise IOError(mess) + + self.hdu_list.append(hdu) + self.hdu_map[ext] = hdu + + extname=hdu.get_extname() + if not self.case_sensitive: + extname=extname.lower() + if extname != '': + # this will guarantee we default to *first* version, + # if version is not requested, using __getitem__ + if extname not in self.hdu_map: + self.hdu_map[extname] = hdu + + ver=hdu.get_extver() + if ver > 0: + key='%s-%s' % (extname,ver) + self.hdu_map[key] = hdu + + + def __iter__(self): + """ + begin iteration over HDUs + """ + if not hasattr(self,'hdu_list'): + self.update_hdu_list() + self._iter_index=0 + return self + + def next(self): + """ + Move to the next iteration + """ + if self._iter_index == len(self.hdu_list): + raise StopIteration + hdu=self.hdu_list[self._iter_index] + self._iter_index += 1 + return hdu + __next__=next + + def __len__(self): + """ + get the number of extensions + """ + if not hasattr(self,'hdu_list'): + self.update_hdu_list() + return len(self.hdu_list) + + def _extract_item(self,item): + """ + utility function to extract an "item", meaning + a extension number,name plus version. + """ + ver=0 + if isinstance(item,tuple): + ver_sent=True + nitem=len(item) + if nitem == 1: + ext=item[0] + elif nitem == 2: + ext,ver=item + else: + ver_sent=False + ext=item + return ext,ver,ver_sent + + def __getitem__(self, item): + """ + Get an hdu by number, name, and possibly version + """ + if not hasattr(self, 'hdu_list'): + self.update_hdu_list() + + ext,ver,ver_sent = self._extract_item(item) + + try: + # if it is an int + hdu = self.hdu_list[ext] + except: + # might be a string + ext=mks(ext) + if not self.case_sensitive: + mess='(case insensitive)' + ext=ext.lower() + else: + mess='(case sensitive)' + + if ver > 0: + key = '%s-%s' % (ext,ver) + if key not in self.hdu_map: + raise IOError("extension not found: %s, " + "version %s %s" % (ext,ver,mess)) + hdu = self.hdu_map[key] + else: + if ext not in self.hdu_map: + raise IOError("extension not found: %s %s" % (ext,mess)) + hdu = self.hdu_map[ext] + + return hdu + + def __contains__(self, item): + """ + tell whether specified extension exists, possibly + with version sent as well + """ + try: + hdu=self[item] + return True + except: + return False + + def __repr__(self): + """ + Text representation of some fits file metadata + """ + spacing = ' '*2 + if not hasattr(self, 'hdu_list'): + self.update_hdu_list() + + rep = [''] + rep.append("%sfile: %s" % (spacing,self._filename)) + rep.append("%smode: %s" % (spacing,_modeprint_map[self.intmode])) + + rep.append('%sextnum %-15s %s' % (spacing,"hdutype","hduname[v]")) + for i,hdu in enumerate(self.hdu_list): + t = hdu._info['hdutype'] + name = hdu.get_extname() + if name != '': + ver=hdu.get_extver() + if ver != 0: + name = '%s[%s]' % (name,ver) + + rep.append("%s%-6d %-15s %s" % (spacing, i, _hdu_type_map[t], name)) + + rep = '\n'.join(rep) + return rep + + #def __del__(self): + # self.close() + def __enter__(self): + return self + def __exit__(self, exception_type, exception_value, traceback): + self.close() + + + +class HDUBase(object): + """ + A representation of a FITS HDU + + construction parameters + ----------------------- + fits: FITS object + An instance of a _fistio_wrap.FITS object. This is the low-level + python object, not the FITS object defined above. + ext: integer + The extension number. + case_sensitive: bool, optional + Match column names and extension names with case-sensitivity. Default + is False. + lower: bool, optional + If True, force all columns names to lower case in output + upper: bool, optional + If True, force all columns names to upper case in output + vstorage: string, optional + Set the default method to store variable length columns. Can be + 'fixed' or 'object'. See docs on fitsio.FITS for details. + iter_row_buffer: integer + Number of rows to buffer when iterating over table HDUs. + Default is 1. + """ + def __init__(self, fits, ext, **keys): + self._FITS = fits + self._ext = ext + + self._update_info() + self._filename = self._FITS.filename() + + def get_extnum(self): + """ + Get the extension number + """ + return self._ext + + def get_extname(self): + """ + Get the name for this extension, can be an empty string + """ + name = self._info['extname'] + if name.strip() == '': + name = self._info['hduname'] + return name.strip() + + def get_extver(self): + """ + Get the version for this extension. + + Used when a name is given to multiple extensions + """ + ver=self._info['extver'] + if ver == 0: + ver=self._info['hduver'] + return ver + + def get_exttype(self, num=False): + """ + Get the extension type + + By default the result is a string that mirrors + the enumerated type names in cfitsio + 'IMAGE_HDU', 'ASCII_TBL', 'BINARY_TBL' + which have numeric values + 0 1 2 + send num=True to get the numbers. The values + fitsio.IMAGE_HDU .ASCII_TBL, and .BINARY_TBL + are available for comparison + + parameters + ---------- + num: bool, optional + Return the numeric values. + """ + if num: + return self._info['hdutype'] + else: + name=_hdu_type_map[self._info['hdutype']] + return name + + def get_offsets(self): + """ + returns + ------- + a dictionary with these entries + + header_start: + byte offset from beginning of the file to the start + of the header + data_start: + byte offset from beginning of the file to the start + of the data section + data_end: + byte offset from beginning of the file to the end + of the data section + + Note these are also in the information dictionary, which + you can access with get_info() + """ + return dict( + header_start=self._info['header_start'], + data_start=self._info['data_start'], + data_end=self._info['data_end'], + ) + + def get_info(self): + """ + Get a copy of the internal dictionary holding extension information + """ + return copy.deepcopy(self._info) + + def get_filename(self): + """ + Get a copy of the filename for this fits file + """ + return copy.copy(self._filename) + + def write_checksum(self): + """ + Write the checksum into the header for this HDU. + + Computes the checksum for the HDU, both the data portion alone (DATASUM + keyword) and the checksum complement for the entire HDU (CHECKSUM). + + returns + ------- + A dict with keys 'datasum' and 'hdusum' + """ + return self._FITS.write_checksum(self._ext+1) + + def verify_checksum(self): + """ + Verify the checksum in the header for this HDU. + """ + res = self._FITS.verify_checksum(self._ext+1) + if res['dataok'] != 1: + raise ValueError("data checksum failed") + if res['hduok'] != 1: + raise ValueError("hdu checksum failed") + + def write_comment(self, comment): + """ + Write a comment into the header + """ + self._FITS.write_comment(self._ext+1, str(comment)) + + def write_history(self, history): + """ + Write history text into the header + """ + self._FITS.write_history(self._ext+1, str(history)) + + def _write_continue(self, value): + """ + Write history text into the header + """ + self._FITS.write_continue(self._ext+1, str(value)) + + def write_key(self, name, value, comment=""): + """ + Write the input value to the header + + parameters + ---------- + name: string + Name of keyword to write/update + value: scalar + Value to write, can be string float or integer type, + including numpy scalar types. + comment: string, optional + An optional comment to write for this key + + Notes + ----- + Write COMMENT and HISTORY using the write_comment and write_history + methods + """ + + if value is None: + self._FITS.write_undefined_key(self._ext+1, + str(name), + str(comment)) + + elif isinstance(value,bool): + if value: + v=1 + else: + v=0 + self._FITS.write_logical_key(self._ext+1, + str(name), + v, + str(comment)) + elif isinstance(value, _stypes): + self._FITS.write_string_key(self._ext+1, + str(name), + str(value), + str(comment)) + elif isinstance(value, _ftypes): + self._FITS.write_double_key(self._ext+1, + str(name), + float(value), + str(comment)) + elif isinstance(value, _itypes): + self._FITS.write_long_key(self._ext+1, + str(name), + int(value), + str(comment)) + elif isinstance(value,(tuple,list)): + vl=[str(el) for el in value] + sval=','.join(vl) + self._FITS.write_string_key(self._ext+1, + str(name), + sval, + str(comment)) + else: + sval=str(value) + mess=("warning, keyword '%s' has non-standard " + "value type %s, " + "Converting to string: '%s'") + warnings.warn(mess % (name,type(value),sval), FITSRuntimeWarning) + self._FITS.write_string_key(self._ext+1, + str(name), + sval, + str(comment)) + + def write_keys(self, records_in, clean=True): + """ + Write the keywords to the header. + + parameters + ---------- + records: FITSHDR or list or dict + Can be one of these: + - FITSHDR object + - list of dictionaries containing 'name','value' and optionally + a 'comment' field; the order is preserved. + - a dictionary of keyword-value pairs; no comments are written + in this case, and the order is arbitrary. + clean: boolean + If True, trim out the standard fits header keywords that are + created on HDU creation, such as EXTEND, SIMPLE, STTYPE, TFORM, + TDIM, XTENSION, BITPIX, NAXIS, etc. + + Notes + ----- + Input keys named COMMENT and HISTORY are written using the + write_comment and write_history methods. + """ + + if isinstance(records_in,FITSHDR): + hdr = records_in + else: + hdr = FITSHDR(records_in) + + if clean: + is_table = isinstance(self, TableHDU) + hdr.clean(is_table=is_table) + + for r in hdr.records(): + name=r['name'].upper() + value=r['value'] + + if name=='COMMENT': + self.write_comment(value) + elif name=='HISTORY': + self.write_history(value) + elif name=='CONTINUE': + self._write_continue(value) + else: + comment=r.get('comment','') + self.write_key(name,value,comment=comment) + + + def read_header(self): + """ + Read the header as a FITSHDR + + The FITSHDR allows access to the values and comments by name and + number. + """ + # note converting strings + return FITSHDR(self.read_header_list(), convert=True) + + def read_header_list(self): + """ + Read the header as a list of dictionaries. + + You will usually use read_header instead, which just sends the output + of this functioin to the constructor of a FITSHDR, which allows access + to the values and comments by name and number. + + Each dictionary is + 'name': the keyword name + 'value': the value field as a string + 'comment': the comment field as a string. + """ + return self._FITS.read_header(self._ext+1) + + + def _update_info(self): + """ + Update metadata for this HDU + """ + try: + self._FITS.movabs_hdu(self._ext+1) + except IOError: + raise RuntimeError("no such hdu") + + self._info = self._FITS.get_hdu_info(self._ext+1) + + def _get_repr_list(self): + """ + Get some representation data common to all HDU types + """ + spacing = ' '*2 + text = [''] + text.append("%sfile: %s" % (spacing,self._filename)) + text.append("%sextension: %d" % (spacing,self._info['hdunum']-1)) + text.append("%stype: %s" % (spacing,_hdu_type_map[self._info['hdutype']])) + + extname=self.get_extname() + if extname != "": + text.append("%sextname: %s" % (spacing,extname)) + extver=self.get_extver() + if extver != 0: + text.append("%sextver: %s" % (spacing,extver)) + + return text, spacing + +class TableHDU(HDUBase): + """ + A class representing a table HDU + """ + def __init__(self, fits, ext, **keys): + super(TableHDU,self).__init__(fits, ext, **keys) + + self.lower=keys.get('lower',False) + self.upper=keys.get('upper',False) + self.trim_strings=keys.get('trim_strings',False) + + self._vstorage=keys.get('vstorage','fixed') + self.case_sensitive=keys.get('case_sensitive',False) + self._iter_row_buffer=keys.get('iter_row_buffer',1) + self.write_bitcols=keys.get('write_bitcols',False) + + if self._info['hdutype'] == ASCII_TBL: + self._table_type_str='ascii' + else: + self._table_type_str='binary' + + def get_nrows(self): + """ + Get number of rows in the table. + """ + nrows=self._info.get('nrows',None) + if nrows is None: + raise ValueError("nrows not in info table; this is a bug") + return nrows + + def get_colnames(self): + """ + Get a copy of the column names for a table HDU + """ + return copy.copy(self._colnames) + + def get_colname(self, colnum): + """ + Get the name associated with the given column number + + parameters + ---------- + colnum: integer + The number for the column, zero offset + """ + if colnum < 0 or colnum > (len(self._colnames)-1): + raise ValueError("colnum out of range [0,%s-1]" % (0,len(self._colnames))) + return self._colnames[colnum] + + def get_vstorage(self): + """ + Get a string representing the storage method for variable length + columns + """ + return copy.copy(self._vstorage) + + def has_data(self): + """ + Determine if this HDU has any data + + Check that the row count is not zero + """ + if self._info['nrows'] > 0: + return True + else: + return False + + def where(self, expression): + """ + Return the indices where the expression evaluates to true. + + parameters + ---------- + expression: string + A fits row selection expression. E.g. + "x > 3 && y < 5" + """ + + return self._FITS.where(self._ext+1, expression) + + def write(self, data, **keys): + """ + Write data into this HDU + + parameters + ---------- + data: ndarray or list of ndarray + A numerical python array. Should be an ordinary array for image + HDUs, should have fields for tables. To write an ordinary array to + a column in a table HDU, use write_column. If data already exists + in this HDU, it will be overwritten. See the append(() method to + append new rows to a table HDU. + firstrow: integer, optional + At which row you should begin writing to tables. Be sure you know + what you are doing! For appending see the append() method. + Default 0. + columns: list, optional + If data is a list of arrays, you must send columns as a list + of names or column numbers + + You can also send names= + names: list, optional + same as columns= + """ + + slow = keys.get('slow',False) + + isrec=False + if isinstance(data,(list,dict)): + if isinstance(data,list): + data_list=data + columns_all = keys.get('columns',None) + if columns_all is None: + columns_all=keys.get('names',None) + if columns_all is None: + raise ValueError("you must send columns with a list of arrays") + + else: + columns_all=list(data.keys()) + data_list=[data[n] for n in columns_all] + + colnums_all = [self._extract_colnum(c) for c in columns_all] + names = [self.get_colname(c) for c in colnums_all] + + isobj=numpy.zeros(len(data_list),dtype=numpy.bool) + for i in xrange(len(data_list)): + isobj[i] = is_object(data_list[i]) + + else: + if data.dtype.fields is None: + raise ValueError("You are writing to a table, so I expected " + "an array with fields as input. If you want " + "to write a simple array, you should use " + "write_column to write to a single column, " + "or instead write to an image hdu") + + if data.shape is (): + raise ValueError("cannot write data with shape ()") + + isrec=True + names=data.dtype.names + # only write object types (variable-length columns) after + # writing the main table + isobj = fields_are_object(data) + + data_list = [] + colnums_all=[] + for i,name in enumerate(names): + colnum = self._extract_colnum(name) + data_list.append(data[name]) + colnums_all.append(colnum) + + if slow: + for i,name in enumerate(names): + if not isobj[i]: + self.write_column(name, data_list[i], **keys) + else: + + nonobj_colnums = [] + nonobj_arrays = [] + for i in xrange(len(data_list)): + if not isobj[i]: + nonobj_colnums.append(colnums_all[i]) + if isrec: + # this still leaves possibility of f-order sub-arrays.. + colref=array_to_native(data_list[i],inplace=False) + else: + colref=array_to_native_c(data_list[i],inplace=False) + nonobj_arrays.append(colref) + + if len(nonobj_arrays) > 0: + firstrow=keys.get('firstrow',0) + self._FITS.write_columns(self._ext+1, nonobj_colnums, nonobj_arrays, + firstrow=firstrow+1, write_bitcols=self.write_bitcols) + + # writing the object arrays always occurs the same way + # need to make sure this works for array fields + for i,name in enumerate(names): + if isobj[i]: + self.write_var_column(name, data_list[i], **keys) + + self._update_info() + + def write_column(self, column, data, **keys): + """ + Write data to a column in this HDU + + This HDU must be a table HDU. + + parameters + ---------- + column: scalar string/integer + The column in which to write. Can be the name or number (0 offset) + column: ndarray + Numerical python array to write. This should match the + shape of the column. You are probably better using fits.write_table() + to be sure. + firstrow: integer, optional + At which row you should begin writing. Be sure you know what you + are doing! For appending see the append() method. Default 0. + """ + + firstrow=keys.get('firstrow',0) + + colnum = self._extract_colnum(column) + + # need it to be contiguous and native byte order. For now, make a + # copy. but we may be able to avoid this with some care. + + if not data.flags['C_CONTIGUOUS']: + # this always makes a copy + data_send = numpy.ascontiguousarray(data) + # this is a copy, we can make sure it is native + # and modify in place if needed + array_to_native(data_send, inplace=True) + else: + # we can avoid the copy with a try-finally block and + # some logic + data_send = array_to_native(data, inplace=False) + + self._FITS.write_column(self._ext+1, colnum+1, data_send, + firstrow=firstrow+1, write_bitcols=self.write_bitcols) + del data_send + self._update_info() + + def write_var_column(self, column, data, firstrow=0, **keys): + """ + Write data to a variable-length column in this HDU + + This HDU must be a table HDU. + + parameters + ---------- + column: scalar string/integer + The column in which to write. Can be the name or number (0 offset) + column: ndarray + Numerical python array to write. This must be an object array. + firstrow: integer, optional + At which row you should begin writing. Be sure you know what you + are doing! For appending see the append() method. Default 0. + """ + + if not is_object(data): + raise ValueError("Only object fields can be written to " + "variable-length arrays") + colnum = self._extract_colnum(column) + + self._FITS.write_var_column(self._ext+1, colnum+1, data, + firstrow=firstrow+1) + self._update_info() + + def insert_column(self, name, data, colnum=None): + """ + Insert a new column. + + parameters + ---------- + name: string + The column name + data: + The data to write into the new column. + colnum: int, optional + The column number for the new column, zero-offset. Default + is to add the new column after the existing ones. + + Notes + ----- + This method is used un-modified by ascii tables as well. + """ + + if name in self._colnames: + raise ValueError("column '%s' already exists" % name) + + descr=data.dtype.descr + if len(descr) > 1: + raise ValueError("you can only insert a single column, " + "requested: %s" % descr) + + this_descr = descr[0] + this_descr = [name, this_descr[1]] + if len(data.shape) > 1: + this_descr += [data.shape[1:]] + this_descr = tuple(this_descr) + + name, fmt, dims = npy2fits(this_descr, + table_type=self._table_type_str) + if dims is not None: + dims=[dims] + + if colnum is None: + new_colnum = len(self._info['colinfo']) + 1 + else: + new_colnum = colnum+1 + self._FITS.insert_col(self._ext+1, new_colnum, name, fmt, tdim=dims) + self._update_info() + + self.write_column(name, data) + + def append(self, data, **keys): + """ + Append new rows to a table HDU + + parameters + ---------- + data: ndarray or list of arrays + + A numerical python array with fields (recarray) or a list of + arrays. Should have the same fields as the existing table. If only + a subset of the table columns are present, the other columns are + filled with zeros. + + columns: list, optional + if a list of arrays is sent, also send the columns + of names or column numbers + """ + + firstrow=self._info['nrows'] + + #if data.dtype.fields is None: + # raise ValueError("got an ordinary array, can only append recarrays. " + # "using this method") + + # make sure these columns exist + #for n in data.dtype.names: + # colnum = self._extract_colnum(n) + + keys['firstrow'] = firstrow + self.write(data, **keys) + + + def delete_rows(self, rows): + """ + Delete rows from the table + + parameters + ---------- + rows: sequence or slice + The exact rows to delete as a sequence, or a slice. + + examples + -------- + # delete a range of rows + with fitsio.FITS(fname,'rw') as fits: + fits['mytable'].delete_rows(slice(3,20)) + + # delete specific rows + with fitsio.FITS(fname,'rw') as fits: + rows2delete = [3,88,76] + fits['mytable'].delete_rows(rows2delete) + """ + + if rows is None: + return + + # extract and convert to 1-offset for C routine + if isinstance(rows, slice): + rows = self._process_slice(rows) + if rows.step is not None and rows.step != 1: + rows = numpy.arange( + rows.start+1, + rows.stop+1, + rows.step, + ) + else: + # rows must be 1-offset + rows = slice(rows.start+1, rows.stop+1) + else: + rows = self._extract_rows(rows) + # rows must be 1-offset + rows += 1 + + if isinstance(rows, slice): + self._FITS.delete_row_range(self._ext+1, rows.start, rows.stop) + else: + if rows.size == 0: + return + + self._FITS.delete_rows(self._ext+1, rows) + + self._update_info() + + def resize(self, nrows, front=False): + """ + Resize the table to the given size, removing or adding rows as + necessary. Note if expanding the table at the end, it is more + efficient to use the append function than resizing and then + writing. + + New added rows are zerod, except for 'i1', 'u2' and 'u4' data types + which get -128,32768,2147483648 respectively + + + parameters + ---------- + nrows: int + new size of table + front: bool, optional + If True, add or remove rows from the front. Default + is False + """ + + nrows_current = self.get_nrows() + if nrows == nrows_current: + return + + if nrows < nrows_current: + rowdiff = nrows_current - nrows + if front: + # delete from the front + start = 0 + stop = rowdiff + else: + # delete from the back + start = nrows + stop = nrows_current + + self.delete_rows(slice(start, stop)) + else: + rowdiff = nrows - nrows_current + if front: + firstrow = 0 # in this case zero is what we want, since the code inserts + else: + firstrow = nrows_current + self._FITS.insert_rows(self._ext+1, firstrow, rowdiff) + + self._update_info() + + + def read(self, **keys): + """ + read data from this HDU + + By default, all data are read. + + send columns= and rows= to select subsets of the data. + Table data are read into a recarray; use read_column() to get a single + column as an ordinary array. You can alternatively use slice notation + fits=fitsio.FITS(filename) + fits[ext][:] + fits[ext][2:5] + fits[ext][200:235:2] + fits[ext][rows] + fits[ext][cols][rows] + + parameters + ---------- + columns: optional + An optional set of columns to read from table HDUs. Default is to + read all. Can be string or number. If a sequence, a recarray + is always returned. If a scalar, an ordinary array is returned. + rows: optional + An optional list of rows to read from table HDUS. Default is to + read all. + vstorage: string, optional + Over-ride the default method to store variable length columns. Can + be 'fixed' or 'object'. See docs on fitsio.FITS for details. + """ + + columns = keys.get('columns',None) + rows = keys.get('rows',None) + + if columns is not None: + if 'columns' in keys: + del keys['columns'] + data = self.read_columns(columns, **keys) + elif rows is not None: + if 'rows' in keys: + del keys['rows'] + data = self.read_rows(rows, **keys) + else: + data = self._read_all(**keys) + + return data + + def _read_all(self, **keys): + """ + Read all data in the HDU. + + parameters + ---------- + vstorage: string, optional + Over-ride the default method to store variable length columns. Can + be 'fixed' or 'object'. See docs on fitsio.FITS for details. + lower: bool, optional + If True, force all columns names to lower case in output. Will over + ride the lower= keyword from construction. + upper: bool, optional + If True, force all columns names to upper case in output. Will over + ride the lower= keyword from construction. + """ + + dtype, offsets, isvar = self.get_rec_dtype(**keys) + + w,=numpy.where(isvar == True) + has_tbit=self._check_tbit() + + if w.size > 0: + vstorage = keys.get('vstorage',self._vstorage) + colnums = self._extract_colnums() + rows=None + array = self._read_rec_with_var(colnums, rows, dtype, + offsets, isvar, vstorage) + elif has_tbit: + # drop down to read_columns since we can't stuff into a contiguous array + colnums=self._extract_colnums() + array = self.read_columns(colnums, **keys) + else: + + firstrow=1 + nrows = self._info['nrows'] + array = numpy.zeros(nrows, dtype=dtype) + + self._FITS.read_as_rec(self._ext+1, 1, nrows, array) + + for colnum,name in enumerate(array.dtype.names): + self._rescale_and_convert_field_inplace(array, + name, + self._info['colinfo'][colnum]['tscale'], + self._info['colinfo'][colnum]['tzero']) + lower=keys.get('lower',False) + upper=keys.get('upper',False) + if self.lower or lower: + _names_to_lower_if_recarray(array) + elif self.upper or upper: + _names_to_upper_if_recarray(array) + + self._maybe_trim_strings(array, **keys) + return array + + def read_column(self, col, **keys): + """ + Read the specified column + + Alternatively, you can use slice notation + fits=fitsio.FITS(filename) + fits[ext][colname][:] + fits[ext][colname][2:5] + fits[ext][colname][200:235:2] + fits[ext][colname][rows] + + Note, if reading multiple columns, it is more efficient to use + read(columns=) or slice notation with a list of column names. + + parameters + ---------- + col: string/int, required + The column name or number. + rows: optional + An optional set of row numbers to read. + vstorage: string, optional + Over-ride the default method to store variable length columns. Can + be 'fixed' or 'object'. See docs on fitsio.FITS for details. + """ + + res = self.read_columns([col], **keys) + colname = res.dtype.names[0] + data = res[colname] + + self._maybe_trim_strings(data, **keys) + return data + + def read_rows(self, rows, **keys): + """ + Read the specified rows. + + parameters + ---------- + rows: list,array + A list or array of row indices. + vstorage: string, optional + Over-ride the default method to store variable length columns. Can + be 'fixed' or 'object'. See docs on fitsio.FITS for details. + lower: bool, optional + If True, force all columns names to lower case in output. Will over + ride the lower= keyword from construction. + upper: bool, optional + If True, force all columns names to upper case in output. Will over + ride the lower= keyword from construction. + """ + if rows is None: + # we actually want all rows! + return self._read_all() + + if self._info['hdutype'] == ASCII_TBL: + keys['rows'] = rows + return self.read(**keys) + + rows = self._extract_rows(rows) + dtype, offsets, isvar = self.get_rec_dtype(**keys) + + w,=numpy.where(isvar == True) + if w.size > 0: + vstorage = keys.get('vstorage',self._vstorage) + colnums=self._extract_colnums() + return self._read_rec_with_var(colnums, rows, dtype, offsets, isvar, vstorage) + else: + array = numpy.zeros(rows.size, dtype=dtype) + self._FITS.read_rows_as_rec(self._ext+1, array, rows) + + for colnum,name in enumerate(array.dtype.names): + self._rescale_and_convert_field_inplace(array, + name, + self._info['colinfo'][colnum]['tscale'], + self._info['colinfo'][colnum]['tzero']) + + lower=keys.get('lower',False) + upper=keys.get('upper',False) + if self.lower or lower: + _names_to_lower_if_recarray(array) + elif self.upper or upper: + _names_to_upper_if_recarray(array) + + self._maybe_trim_strings(array, **keys) + + return array + + + def read_columns(self, columns, **keys): + """ + read a subset of columns from this binary table HDU + + By default, all rows are read. Send rows= to select subsets of the + data. Table data are read into a recarray for multiple columns, + plain array for a single column. + + parameters + ---------- + columns: list/array + An optional set of columns to read from table HDUs. Can be string + or number. If a sequence, a recarray is always returned. If a + scalar, an ordinary array is returned. + rows: list/array, optional + An optional list of rows to read from table HDUS. Default is to + read all. + vstorage: string, optional + Over-ride the default method to store variable length columns. Can + be 'fixed' or 'object'. See docs on fitsio.FITS for details. + lower: bool, optional + If True, force all columns names to lower case in output. Will over + ride the lower= keyword from construction. + upper: bool, optional + If True, force all columns names to upper case in output. Will over + ride the lower= keyword from construction. + """ + + if self._info['hdutype'] == ASCII_TBL: + keys['columns'] = columns + return self.read(**keys) + + rows = keys.get('rows',None) + + # if columns is None, returns all. Guaranteed to be unique and sorted + colnums = self._extract_colnums(columns) + if isinstance(colnums,int): + # scalar sent, don't read as a recarray + return self.read_column(columns, **keys) + + # if rows is None still returns None, and is correctly interpreted + # by the reader to mean all + rows = self._extract_rows(rows) + + # this is the full dtype for all columns + dtype, offsets, isvar = self.get_rec_dtype(colnums=colnums, **keys) + + w,=numpy.where(isvar == True) + if w.size > 0: + vstorage = keys.get('vstorage',self._vstorage) + array = self._read_rec_with_var(colnums, rows, dtype, offsets, isvar, vstorage) + else: + + if rows is None: + nrows = self._info['nrows'] + else: + nrows = rows.size + array = numpy.zeros(nrows, dtype=dtype) + + colnumsp = colnums[:].copy() + colnumsp[:] += 1 + self._FITS.read_columns_as_rec(self._ext+1, colnumsp, array, rows) + + for i in xrange(colnums.size): + colnum = int(colnums[i]) + name = array.dtype.names[i] + self._rescale_and_convert_field_inplace(array, + name, + self._info['colinfo'][colnum]['tscale'], + self._info['colinfo'][colnum]['tzero']) + + if (self._check_tbit(colnums=colnums)): + array = self._fix_tbit_dtype(array, colnums) + + lower=keys.get('lower',False) + upper=keys.get('upper',False) + if self.lower or lower: + _names_to_lower_if_recarray(array) + elif self.upper or upper: + _names_to_upper_if_recarray(array) + + self._maybe_trim_strings(array, **keys) + + return array + + def read_slice(self, firstrow, lastrow, step=1, **keys): + """ + Read the specified row slice from a table. + + Read all rows between firstrow and lastrow (non-inclusive, as per + python slice notation). Note you must use slice notation for + images, e.g. f[ext][20:30, 40:50] + + parameters + ---------- + firstrow: integer + The first row to read + lastrow: integer + The last row to read, non-inclusive. This follows the python list + slice convention that one does not include the last element. + step: integer, optional + Step between rows, default 1. e.g., if step is 2, skip every other row. + vstorage: string, optional + Over-ride the default method to store variable length columns. Can + be 'fixed' or 'object'. See docs on fitsio.FITS for details. + lower: bool, optional + If True, force all columns names to lower case in output. Will over + ride the lower= keyword from construction. + upper: bool, optional + If True, force all columns names to upper case in output. Will over + ride the lower= keyword from construction. + """ + + if self._info['hdutype'] == ASCII_TBL: + rows = numpy.arange(firstrow, lastrow, step, dtype='i8') + keys['rows'] = rows + return self.read_ascii(**keys) + + step=keys.get('step',1) + if self._info['hdutype'] == IMAGE_HDU: + raise ValueError("slices currently only supported for tables") + + maxrow = self._info['nrows'] + if firstrow < 0 or lastrow > maxrow: + raise ValueError("slice must specify a sub-range of [%d,%d]" % (0,maxrow)) + + dtype, offsets, isvar = self.get_rec_dtype(**keys) + + w,=numpy.where(isvar == True) + if w.size > 0: + vstorage = keys.get('vstorage',self._vstorage) + rows=numpy.arange(firstrow,lastrow,step,dtype='i8') + colnums=self._extract_colnums() + array = self._read_rec_with_var(colnums, rows, dtype, offsets, isvar, vstorage) + else: + if step != 1: + rows = numpy.arange(firstrow, lastrow, step, dtype='i8') + array = self.read(rows=rows) + else: + # no +1 because lastrow is non-inclusive + nrows=lastrow-firstrow + array = numpy.zeros(nrows, dtype=dtype) + + # only first needs to be +1. This is becuase the c code is inclusive + self._FITS.read_as_rec(self._ext+1, firstrow+1, lastrow, array) + + for colnum,name in enumerate(array.dtype.names): + self._rescale_and_convert_field_inplace(array, + name, + self._info['colinfo'][colnum]['tscale'], + self._info['colinfo'][colnum]['tzero']) + + lower=keys.get('lower',False) + upper=keys.get('upper',False) + if self.lower or lower: + _names_to_lower_if_recarray(array) + elif self.upper or upper: + _names_to_upper_if_recarray(array) + + self._maybe_trim_strings(array, **keys) + + + return array + + def get_rec_dtype(self, **keys): + """ + Get the dtype for the specified columns + + parameters + ---------- + colnums: integer array + The column numbers, 0 offset + vstorage: string, optional + See docs in read_columns + """ + colnums=keys.get('colnums',None) + vstorage = keys.get('vstorage',self._vstorage) + + if colnums is None: + colnums = self._extract_colnums() + + + descr = [] + isvararray = numpy.zeros(len(colnums),dtype=numpy.bool) + for i,colnum in enumerate(colnums): + dt,isvar = self.get_rec_column_descr(colnum, vstorage) + descr.append(dt) + isvararray[i] = isvar + dtype=numpy.dtype(descr) + + offsets = numpy.zeros(len(colnums),dtype='i8') + for i,n in enumerate(dtype.names): + offsets[i] = dtype.fields[n][1] + return dtype, offsets, isvararray + + def _check_tbit(self, **keys): + """ + Check if one of the columns is a TBIT column + + parameters + ---------- + colnums: integer array, optional + """ + colnums=keys.get('colnums',None) + + if colnums is None: + colnums = self._extract_colnums() + + has_tbit=False + for i,colnum in enumerate(colnums): + npy_type,isvar,istbit = self._get_tbl_numpy_dtype(colnum) + if (istbit) : + has_tbit=True + break + + return has_tbit + + def _fix_tbit_dtype(self, array, colnums): + """ + If necessary, patch up the TBIT to convert to bool array + + parameters + ---------- + array: record array + colnums: column numbers for lookup + """ + descr = array.dtype.descr + for i,colnum in enumerate(colnums): + npy_type,isvar,istbit = self._get_tbl_numpy_dtype(colnum) + if (istbit) : + coldescr=list(descr[i]) + coldescr[1]='?' + descr[i]=tuple(coldescr) + + return array.view(descr) + + def _get_simple_dtype_and_shape(self, colnum, rows=None): + """ + When reading a single column, we want the basic data + type and the shape of the array. + + for scalar columns, shape is just nrows, otherwise + it is (nrows, dim1, dim2) + + Note if rows= is sent and only a single row is requested, + the shape will be (dim2,dim2) + + + """ + + # basic datatype + npy_type,isvar,istbit = self._get_tbl_numpy_dtype(colnum) + info = self._info['colinfo'][colnum] + name = info['name'] + + if rows is None: + nrows = self._info['nrows'] + else: + nrows = rows.size + + shape = None + tdim = info['tdim'] + + shape = tdim2shape(tdim, name, is_string=(npy_type[0] == 'S')) + if shape is not None: + if nrows > 1: + if not isinstance(shape,tuple): + # vector + shape = (nrows,shape) + else: + # multi-dimensional + shape = tuple( [nrows] + list(shape) ) + else: + # scalar + shape = nrows + return npy_type, shape + + def get_rec_column_descr(self, colnum, vstorage): + """ + Get a descriptor entry for the specified column. + + parameters + ---------- + colnum: integer + The column number, 0 offset + vstorage: string + See docs in read_columns + """ + npy_type,isvar,istbit = self._get_tbl_numpy_dtype(colnum) + name = self._info['colinfo'][colnum]['name'] + + if isvar: + if vstorage == 'object': + descr=(name,'O') + else: + tform = self._info['colinfo'][colnum]['tform'] + max_size = extract_vararray_max(tform) + + if max_size <= 0: + name=self._info['colinfo'][colnum]['name'] + mess='Will read as an object field' + if max_size < 0: + mess="Column '%s': No maximum size: '%s'. %s" + mess=mess % (name,tform,mess) + warnings.warn(mess, FITSRuntimeWarning) + else: + mess="Column '%s': Max size is zero: '%s'. %s" + mess=mess % (name,tform,mess) + warnings.warn(mess, FITSRuntimeWarning) + + # we are forced to read this as an object array + return self.get_rec_column_descr(colnum, 'object') + + if npy_type[0] == 'S': + # variable length string columns cannot + # themselves be arrays I don't think + npy_type = 'S%d' % max_size + descr=(name,npy_type) + else: + descr=(name,npy_type,max_size) + else: + tdim = self._info['colinfo'][colnum]['tdim'] + shape = tdim2shape(tdim, name, is_string=(npy_type[0] == 'S')) + if shape is not None: + descr=(name,npy_type,shape) + else: + descr=(name,npy_type) + return descr,isvar + + + def _read_rec_with_var(self, colnums, rows, dtype, offsets, isvar, vstorage): + """ + + Read columns from a table into a rec array, including variable length + columns. This is special because, for efficiency, it involves reading + from the main table as normal but skipping the columns in the array + that are variable. Then reading the variable length columns, with + accounting for strides appropriately. + + row and column numbers should be checked before calling this function + + """ + + colnumsp=colnums+1 + if rows is None: + nrows = self._info['nrows'] + else: + nrows = rows.size + array = numpy.zeros(nrows, dtype=dtype) + + # read from the main table first + wnotvar,=numpy.where(isvar == False) + if wnotvar.size > 0: + thesecol=colnumsp[wnotvar] # this will be contiguous (not true for slices) + theseoff=offsets[wnotvar] + self._FITS.read_columns_as_rec_byoffset(self._ext+1, + thesecol, + theseoff, + array, + rows) + for i in xrange(thesecol.size): + + name = array.dtype.names[wnotvar[i]] + colnum = thesecol[i]-1 + self._rescale_and_convert_field_inplace(array, + name, + self._info['colinfo'][colnum]['tscale'], + self._info['colinfo'][colnum]['tzero']) + + + # now read the variable length arrays we may be able to speed this up + # by storing directly instead of reading first into a list + wvar,=numpy.where(isvar == True) + if wvar.size > 0: + thesecol=colnumsp[wvar] # this will be contiguous (not true for slices) + for i in xrange(thesecol.size): + colnump = thesecol[i] + name = array.dtype.names[wvar[i]] + dlist = self._FITS.read_var_column_as_list(self._ext+1,colnump,rows) + if isinstance(dlist[0],str): + is_string=True + else: + is_string=False + + if array[name].dtype.descr[0][1][1] == 'O': + # storing in object array + # get references to each, no copy made + for irow,item in enumerate(dlist): + array[name][irow] = item + else: + for irow,item in enumerate(dlist): + if is_string: + array[name][irow]= item + else: + ncopy = len(item) + + if sys.version_info > (3,0,0): + ts = array[name].dtype.descr[0][1][1] + if ts != 'S': + array[name][irow][0:ncopy] = item[:] + else: + array[name][irow] = item + else: + array[name][irow][0:ncopy] = item[:] + + return array + + def _extract_rows(self, rows): + """ + Extract an array of rows from an input scalar or sequence + """ + if rows is not None: + rows = numpy.array(rows, ndmin=1, copy=False, dtype='i8') + # returns unique, sorted + rows = numpy.unique(rows) + + maxrow = self._info['nrows']-1 + if rows[0] < 0 or rows[-1] > maxrow: + raise ValueError("rows must be in [%d,%d]" % (0,maxrow)) + return rows + + def _process_slice(self, arg): + """ + process the input slice for use calling the C code + """ + start = arg.start + stop = arg.stop + step = arg.step + + nrows=self._info['nrows'] + if step is None: + step=1 + if start is None: + start = 0 + if stop is None: + stop = nrows + + if start < 0: + start = nrows + start + if start < 0: + raise IndexError("Index out of bounds") + + if stop < 0: + stop = nrows + start + 1 + + if stop < start: + # will return an empty struct + stop = start + + if stop > nrows: + stop=nrows + return slice(start, stop, step) + + def _slice2rows(self, start, stop, step=None): + """ + Convert a slice to an explicit array of rows + """ + nrows=self._info['nrows'] + if start is None: + start=0 + if stop is None: + stop=nrows + if step is None: + step=1 + + tstart = self._fix_range(start) + tstop = self._fix_range(stop) + if tstart == 0 and tstop == nrows: + # this is faster: if all fields are also requested, then a + # single fread will be done + return None + if stop < start: + raise ValueError("start is greater than stop in slice") + return numpy.arange(tstart, tstop, step, dtype='i8') + + def _fix_range(self, num, isslice=True): + """ + Ensure the input is within range. + + If el=True, then don't treat as a slice element + """ + + nrows = self._info['nrows'] + if isslice: + # include the end + if num < 0: + num=nrows + (1+num) + elif num > nrows: + num=nrows + else: + # single element + if num < 0: + num=nrows + num + elif num > (nrows-1): + num=nrows-1 + + return num + + def _rescale_and_convert_field_inplace(self, array, name, scale, zero): + """ + Apply fits scalings. Also, convert bool to proper + numpy boolean values + """ + self._rescale_array(array[name], scale, zero) + if array[name].dtype==numpy.bool: + array[name] = self._convert_bool_array(array[name]) + return array + + def _rescale_and_convert(self, array, scale, zero, name=None): + """ + Apply fits scalings. Also, convert bool to proper + numpy boolean values + """ + self._rescale_array(array, scale, zero) + if array.dtype==numpy.bool: + array = self._convert_bool_array(array) + + return array + + + def _rescale_array(self, array, scale, zero): + """ + Scale the input array + """ + if scale != 1.0: + sval=numpy.array(scale,dtype=array.dtype) + array *= sval + if zero != 0.0: + zval=numpy.array(zero,dtype=array.dtype) + array += zval + + def _maybe_trim_strings(self, array, **keys): + """ + if requested, trim trailing white space from + all string fields in the input array + """ + trim_strings = keys.get('trim_strings',False) + if self.trim_strings or trim_strings: + _trim_strings(array) + + def _convert_bool_array(self, array): + """ + cfitsio reads as characters 'T' and 'F' -- convert to real boolean + If input is a fits bool, convert to numpy boolean + """ + + output = (array.view(numpy.int8) == ord('T')).astype(numpy.bool) + return output + + def _get_tbl_numpy_dtype(self, colnum, include_endianness=True): + """ + Get numpy type for the input column + """ + table_type = self._info['hdutype'] + table_type_string = _hdu_type_map[table_type] + try: + ftype = self._info['colinfo'][colnum]['eqtype'] + if table_type == ASCII_TBL: + npy_type = _table_fits2npy_ascii[abs(ftype)] + else: + npy_type = _table_fits2npy[abs(ftype)] + except KeyError: + raise KeyError("unsupported %s fits data " + "type: %d" % (table_type_string, ftype)) + + istbit=False + if (ftype == 1): + istbit=True + + isvar=False + if ftype < 0: + isvar=True + if include_endianness: + # if binary we will read the big endian bytes directly, + # if ascii we read into native byte order + if table_type == ASCII_TBL: + addstr='' + else: + addstr='>' + if npy_type not in ['u1','i1','S']: + npy_type = addstr+npy_type + + if npy_type == 'S': + width = self._info['colinfo'][colnum]['width'] + npy_type = 'S%d' % width + return npy_type, isvar, istbit + + + def _process_args_as_rows_or_columns(self, arg, unpack=False): + """ + We must be able to interpret the args as as either a column name or + row number, or sequences thereof. Numpy arrays and slices are also + fine. + + Examples: + 'field' + 35 + [35,55,86] + ['f1',f2',...] + Can also be tuples or arrays. + """ + + isslice = False + isrows = False + result=arg + if isinstance(arg, (tuple,list,numpy.ndarray)): + # a sequence was entered + if isstring(arg[0]): + pass + else: + isrows=True + result = arg + elif isstring(arg): + # a single string was entered + pass + elif isinstance(arg, slice): + isrows=True + if unpack: + result = self._slice2rows(arg.start, arg.stop, arg.step) + else: + isslice=True + result = self._process_slice(arg) + else: + # a single object was entered. Probably should apply some more + # checking on this + isrows=True + + return result, isrows, isslice + + def _read_var_column(self, colnum, rows, vstorage): + """ + + first read as a list of arrays, then copy into either a fixed length + array or an array of objects, depending on vstorage. + + """ + + if sys.version_info > (3,0,0): + stype=bytes + else: + stype=str + + dlist = self._FITS.read_var_column_as_list(self._ext+1,colnum+1,rows) + + if vstorage == 'fixed': + + tform = self._info['colinfo'][colnum]['tform'] + max_size = extract_vararray_max(tform) + + if max_size <= 0: + name=self._info['colinfo'][colnum]['name'] + mess='Will read as an object field' + if max_size < 0: + mess="Column '%s': No maximum size: '%s'. %s" + mess=mess % (name,tform,mess) + warnings.warn(mess, FITSRuntimeWarning) + else: + mess="Column '%s': Max size is zero: '%s'. %s" + mess=mess % (name,tform,mess) + warnings.warn(mess, FITSRuntimeWarning) + + # we are forced to read this as an object array + return self._read_var_column(colnum, rows, 'object') + + if isinstance(dlist[0],stype): + descr = 'S%d' % max_size + array = numpy.fromiter(dlist, descr) + else: + descr=dlist[0].dtype.str + array = numpy.zeros( (len(dlist), max_size), dtype=descr) + + for irow,item in enumerate(dlist): + ncopy = len(item) + array[irow,0:ncopy] = item[:] + else: + array=numpy.zeros(len(dlist), dtype='O') + for irow,item in enumerate(dlist): + array[irow] = item + + return array + + def _extract_colnums(self, columns=None): + """ + Extract an array of columns from the input + """ + if columns is None: + return numpy.arange(self._ncol, dtype='i8') + + if not isinstance(columns,(tuple,list,numpy.ndarray)): + # is a scalar + return self._extract_colnum(columns) + + colnums = numpy.zeros(len(columns), dtype='i8') + for i in xrange(colnums.size): + colnums[i] = self._extract_colnum(columns[i]) + + # returns unique sorted + colnums = numpy.unique(colnums) + return colnums + + def _extract_colnum(self, col): + """ + Get the column number for the input column + """ + if isinteger(col): + colnum = col + + if (colnum < 0) or (colnum > (self._ncol-1)): + raise ValueError("column number should be in [0,%d]" % (0,self._ncol-1)) + else: + colstr=mks(col) + try: + if self.case_sensitive: + mess="column name '%s' not found (case sensitive)" % col + colnum = self._colnames.index(colstr) + else: + mess="column name '%s' not found (case insensitive)" % col + colnum = self._colnames_lower.index(colstr.lower()) + except ValueError: + raise ValueError(mess) + return int(colnum) + + def _update_info(self): + """ + Call parent method and make sure this is in fact a + table HDU. Set some convenience data. + """ + super(TableHDU,self)._update_info() + if self._info['hdutype'] == IMAGE_HDU: + mess="Extension %s is not a Table HDU" % self.ext + raise ValueError(mess) + if 'colinfo' in self._info: + self._colnames = [i['name'] for i in self._info['colinfo']] + self._colnames_lower = [i['name'].lower() for i in self._info['colinfo']] + self._ncol = len(self._colnames) + + def __getitem__(self, arg): + """ + Get data from a table using python [] notation. + + You can use [] to extract column and row subsets, or read everything. + The notation is essentially the same as numpy [] notation, except that + a sequence of column names may also be given. Examples reading from + "filename", extension "ext" + + fits=fitsio.FITS(filename) + fits[ext][:] + fits[ext][2:5] + fits[ext][200:235:2] + fits[ext][rows] + fits[ext][cols][rows] + + Note data are only read once the rows are specified. + + Note you can only read variable length arrays the default way, + using this function, so set it as you want on construction. + + This function is used for ascii tables as well + """ + + res, isrows, isslice = \ + self._process_args_as_rows_or_columns(arg) + + if isrows: + # rows were entered: read all columns + if isslice: + array = self.read_slice(res.start, res.stop, res.step) + else: + # will also get here if slice is entered but this + # is an ascii table + array = self.read(rows=res) + else: + return TableColumnSubset(self, res) + + if self.lower: + _names_to_lower_if_recarray(array) + elif self.upper: + _names_to_upper_if_recarray(array) + + self._maybe_trim_strings(array) + + return array + + def __iter__(self): + """ + Get an iterator for a table + + e.g. + f=fitsio.FITS(fname) + hdu1 = f[1] + for row in hdu1: + ... + """ + + # always start with first row + self._iter_row=0 + + # for iterating we must assume the number of rows will not change + self._iter_nrows=self.get_nrows() + + self._buffer_iter_rows(0) + return self + + def next(self): + """ + get the next row when iterating + + e.g. + f=fitsio.FITS(fname) + hdu1 = f[1] + for row in hdu1: + ... + + By default read one row at a time. Send iter_row_buffer to get a more + efficient buffering. + """ + return self._get_next_buffered_row() + __next__=next + + def _get_next_buffered_row(self): + """ + Get the next row for iteration. + """ + if self._iter_row == self._iter_nrows: + raise StopIteration + + if self._row_buffer_index >= self._iter_row_buffer: + self._buffer_iter_rows(self._iter_row) + + data=self._row_buffer[self._row_buffer_index] + self._iter_row += 1 + self._row_buffer_index += 1 + return data + + def _buffer_iter_rows(self, start): + """ + Read in the buffer for iteration + """ + self._row_buffer = self[start:start+self._iter_row_buffer] + + # start back at the front of the buffer + self._row_buffer_index = 0 + + def __repr__(self): + """ + textual representation for some metadata + """ + text, spacing = self._get_repr_list() + + text.append('%srows: %d' % (spacing,self._info['nrows'])) + text.append('%scolumn info:' % spacing) + + cspacing = ' '*4 + nspace = 4 + nname = 15 + ntype = 6 + format = cspacing + "%-" + str(nname) + "s %" + str(ntype) + "s %s" + pformat = cspacing + "%-" + str(nname) + "s\n %" + str(nspace+nname+ntype) + "s %s" + + for colnum,c in enumerate(self._info['colinfo']): + if len(c['name']) > nname: + f = pformat + else: + f = format + + dt,isvar,istbit = self._get_tbl_numpy_dtype(colnum, include_endianness=False) + if isvar: + tform = self._info['colinfo'][colnum]['tform'] + if dt[0] == 'S': + dt = 'S0' + dimstr='vstring[%d]' % extract_vararray_max(tform) + else: + dimstr = 'varray[%s]' % extract_vararray_max(tform) + else: + if dt[0] == 'S': + is_string=True + else: + is_string=False + dimstr = _get_col_dimstr(c['tdim'],is_string=is_string) + + s = f % (c['name'],dt,dimstr) + text.append(s) + + text = '\n'.join(text) + return text + + +class AsciiTableHDU(TableHDU): + def read(self, **keys): + """ + read a data from an ascii table HDU + + By default, all rows are read. Send rows= to select subsets of the + data. Table data are read into a recarray for multiple columns, + plain array for a single column. + + parameters + ---------- + columns: list/array + An optional set of columns to read from table HDUs. Can be string + or number. If a sequence, a recarray is always returned. If a + scalar, an ordinary array is returned. + rows: list/array, optional + An optional list of rows to read from table HDUS. Default is to + read all. + vstorage: string, optional + Over-ride the default method to store variable length columns. Can + be 'fixed' or 'object'. See docs on fitsio.FITS for details. + lower: bool, optional + If True, force all columns names to lower case in output. Will over + ride the lower= keyword from construction. + upper: bool, optional + If True, force all columns names to upper case in output. Will over + ride the lower= keyword from construction. + """ + + rows = keys.get('rows',None) + columns = keys.get('columns',None) + + # if columns is None, returns all. Guaranteed to be unique and sorted + colnums = self._extract_colnums(columns) + if isinstance(colnums,int): + # scalar sent, don't read as a recarray + return self.read_column(columns, **keys) + + rows = self._extract_rows(rows) + if rows is None: + nrows = self._info['nrows'] + else: + nrows = rows.size + + # if rows is None still returns None, and is correctly interpreted + # by the reader to mean all + rows = self._extract_rows(rows) + + # this is the full dtype for all columns + dtype, offsets, isvar = self.get_rec_dtype(colnums=colnums, **keys) + array = numpy.zeros(nrows, dtype=dtype) + + # note reading into existing data + wnotvar,=numpy.where(isvar == False) + if wnotvar.size > 0: + for i in wnotvar: + colnum = colnums[i] + name=array.dtype.names[i] + a=array[name].copy() + self._FITS.read_column(self._ext+1,colnum+1, a, rows) + array[name] = a + del a + + wvar,=numpy.where(isvar == True) + if wvar.size > 0: + for i in wvar: + colnum = colnums[i] + name = array.dtype.names[i] + dlist = self._FITS.read_var_column_as_list(self._ext+1,colnum+1,rows) + if isinstance(dlist[0],str): + is_string=True + else: + is_string=False + + if array[name].dtype.descr[0][1][1] == 'O': + # storing in object array + # get references to each, no copy made + for irow,item in enumerate(dlist): + array[name][irow] = item + else: + for irow,item in enumerate(dlist): + if is_string: + array[name][irow]= item + else: + ncopy = len(item) + array[name][irow][0:ncopy] = item[:] + + lower=keys.get('lower',False) + upper=keys.get('upper',False) + if self.lower or lower: + _names_to_lower_if_recarray(array) + elif self.upper or upper: + _names_to_upper_if_recarray(array) + + self._maybe_trim_strings(array, **keys) + + return array + read_ascii=read + +class ImageHDU(HDUBase): + def _update_info(self): + """ + Call parent method and make sure this is in fact a + image HDU. Set dims in C order + """ + super(ImageHDU,self)._update_info() + + if self._info['hdutype'] != IMAGE_HDU: + mess="Extension %s is not a Image HDU" % self.ext + raise ValueError(mess) + + # convert to c order + if 'dims' in self._info: + self._info['dims'] = list( reversed(self._info['dims']) ) + + def has_data(self): + """ + Determine if this HDU has any data + + For images, check that the dimensions are not zero. + + For tables, check that the row count is not zero + """ + ndims = self._info.get('ndims',0) + if ndims == 0: + return False + else: + return True + + def is_compressed(self): + """ + returns true of this extension is compressed + """ + return self._info['is_compressed_image']==1 + + def get_comptype(self): + """ + Get the compression type. + + None if the image is not compressed. + """ + return self._info['comptype'] + + def get_dims(self): + """ + get the shape of the image. Returns () for empty + """ + if self._info['ndims'] != 0: + dims = self._info['dims'] + else: + dims = () + + return dims + + def reshape(self, dims): + """ + reshape an existing image to the requested dimensions + + parameters + ---------- + dims: sequence + Any sequence convertible to i8 + """ + + adims = numpy.array(dims, ndmin=1, dtype='i8') + self._FITS.reshape_image(self._ext+1, adims) + + def write(self, img, start=0, **keys): + """ + Write the image into this HDU + + If data already exist in this HDU, they will be overwritten. If the + image to write is larger than the image on disk, or if the start + position is such that the write would extend beyond the existing + dimensions, the on-disk image is expanded. + + parameters + ---------- + img: ndarray + A simple numpy ndarray + start: integer or sequence + Where to start writing data. Can be an integer offset + into the entire array, or a sequence determining where + in N-dimensional space to start. + """ + + dims=self.get_dims() + + if img.dtype.fields is not None: + raise ValueError("got recarray, expected regular ndarray") + if img.size == 0: + raise ValueError("data must have at least 1 row") + + # data must be c-contiguous and native byte order + if not img.flags['C_CONTIGUOUS']: + # this always makes a copy + img_send = numpy.ascontiguousarray(img) + array_to_native(img_send, inplace=True) + else: + img_send = array_to_native(img, inplace=False) + + if not numpy.isscalar(start): + # convert to scalar offset + # note we use the on-disk data type to get itemsize + + offset = _convert_full_start_to_offset(dims, start) + else: + offset = start + + # see if we need to resize the image + if self.has_data(): + self._expand_if_needed(dims, img.shape, start, offset) + + self._FITS.write_image(self._ext+1, img_send, offset+1) + self._update_info() + + + def read(self, **keys): + """ + Read the image. + + If the HDU is an IMAGE_HDU, read the corresponding image. Compression + and scaling are dealt with properly. + """ + if not self.has_data(): + return None + + dtype, shape = self._get_dtype_and_shape() + array = numpy.zeros(shape, dtype=dtype) + self._FITS.read_image(self._ext+1, array) + return array + + + def _get_dtype_and_shape(self): + """ + Get the numpy dtype and shape for image + """ + npy_dtype = self._get_image_numpy_dtype() + + if self._info['ndims'] != 0: + shape = self._info['dims'] + else: + raise IOError("no image present in HDU") + + return npy_dtype, shape + + def _get_image_numpy_dtype(self): + """ + Get the numpy dtype for the image + """ + try: + ftype = self._info['img_equiv_type'] + npy_type = _image_bitpix2npy[ftype] + except KeyError: + raise KeyError("unsupported fits data type: %d" % ftype) + + return npy_type + + def __getitem__(self, arg): + """ + Get data from an image using python [] slice notation. + + e.g., [2:25, 4:45]. + """ + return self._read_image_slice(arg) + + def _read_image_slice(self, arg): + """ + workhorse to read a slice + """ + if 'ndims' not in self._info: + raise ValueError("Attempt to slice empty extension") + + if isinstance(arg, slice): + # one-dimensional, e.g. 2:20 + return self._read_image_slice((arg,)) + + if not isinstance(arg, tuple): + raise ValueError("arguments must be slices, one for each " + "dimension, e.g. [2:5] or [2:5,8:25] etc.") + + # should be a tuple of slices, one for each dimension + # e.g. [2:3, 8:100] + nd = len(arg) + if nd != self._info['ndims']: + raise ValueError("Got slice dimensions %d, " + "expected %d" % (nd,self._info['ndims'])) + + + targ=arg + arg=[] + for a in targ: + if isinstance(a,slice): + arg.append(a) + elif isinstance(a,int): + arg.append( slice(a,a+1,1) ) + else: + raise ValueError("arguments must be slices, e.g. 2:12") + + dims=self._info['dims'] + arrdims = [] + first = [] + last = [] + steps = [] + + # check the args and reverse dimensions since + # fits is backwards from numpy + dim=0 + for slc in arg: + start = slc.start + stop = slc.stop + step = slc.step + + if start is None: + start=0 + if stop is None: + stop = dims[dim] + if step is None: + step=1 + if step < 1: + raise ValueError("slice steps must be >= 1") + + if start < 0: + start = dims[dim] + start + if start < 0: + raise IndexError("Index out of bounds") + + if stop < 0: + stop = dims[dim] + start + 1 + + # move to 1-offset + start = start + 1 + + if stop < start: + raise ValueError("python slices but include at least one " + "element, got %s" % slc) + if stop > dims[dim]: + stop = dims[dim] + + first.append(start) + last.append(stop) + steps.append(step) + arrdims.append(stop-start+1) + + dim += 1 + + first.reverse() + last.reverse() + steps.reverse() + first = numpy.array(first, dtype='i8') + last = numpy.array(last, dtype='i8') + steps = numpy.array(steps, dtype='i8') + + npy_dtype = self._get_image_numpy_dtype() + array = numpy.zeros(arrdims, dtype=npy_dtype) + self._FITS.read_image_slice(self._ext+1, first, last, steps, array) + return array + + def _expand_if_needed(self, dims, write_dims, start, offset): + """ + expand the on-disk image if the indended write will extend + beyond the existing dimensions + """ + from operator import mul + + if numpy.isscalar(start): + start_is_scalar=True + else: + start_is_scalar=False + + existing_size=reduce(mul, dims, 1) + required_size = offset + reduce(mul, write_dims, 1) + + if required_size > existing_size: + print(" required size:",required_size,"existing size:",existing_size) + # we need to expand the image + ndim=len(dims) + idim=len(write_dims) + + if start_is_scalar: + if start == 0: + start=[0]*ndim + else: + raise ValueError("When expanding " + "an existing image while writing, the start keyword " + "must have the same number of dimensions " + "as the image or be exactly 0, got %s " % start) + + if idim != ndim: + raise ValueError("When expanding " + "an existing image while writing, the input image " + "must have the same number of dimensions " + "as the original. " + "Got %d instead of %d" % (idim,ndim)) + new_dims = [] + for i in xrange(ndim): + required_dim = start[i] + write_dims[i] + + if required_dim < dims[i]: + # careful not to shrink the image! + dimsize=dims[i] + else: + dimsize=required_dim + + new_dims.append(dimsize) + + print(" reshaping image to:",new_dims) + self.reshape(new_dims) + + def __repr__(self): + """ + Representation for ImageHDU + """ + text, spacing = self._get_repr_list() + text.append("%simage info:" % spacing) + cspacing = ' '*4 + + # need this check for when we haven't written data yet + if 'ndims' in self._info: + if self._info['comptype'] is not None: + text.append("%scompression: %s" % (cspacing,self._info['comptype'])) + + if self._info['ndims'] != 0: + dimstr = [str(d) for d in self._info['dims']] + dimstr = ",".join(dimstr) + else: + dimstr='' + + dt = _image_bitpix2npy[self._info['img_equiv_type']] + text.append("%sdata type: %s" % (cspacing,dt)) + text.append("%sdims: [%s]" % (cspacing,dimstr)) + + text = '\n'.join(text) + return text + +def _get_col_dimstr(tdim, is_string=False): + """ + not for variable length + """ + dimstr='' + if tdim is None: + dimstr='array[bad TDIM]' + else: + if is_string: + if len(tdim) > 1: + dimstr = [str(d) for d in tdim[1:]] + else: + if len(tdim) > 1 or tdim[0] > 1: + dimstr = [str(d) for d in tdim] + if dimstr != '': + dimstr = ','.join(dimstr) + dimstr = 'array[%s]' % dimstr + + return dimstr + +class TableColumnSubset(object): + """ + + A class representing a subset of the the columns on disk. When called + with .read() or [ rows ] the data are read from disk. + + Useful because subsets can be passed around to functions, or chained + with a row selection. + + This class is returned when using [ ] notation to specify fields in a + TableHDU class + + fits = fitsio.FITS(fname) + colsub = fits[ext][field_list] + + returns a TableColumnSubset object. To read rows: + + data = fits[ext][field_list][row_list] + + colsub = fits[ext][field_list] + data = colsub[row_list] + data = colsub.read(rows=row_list) + + to read all, use .read() with no args or [:] + """ + + def __init__(self, fitshdu, columns): + """ + Input is the SFile instance and a list of column names. + """ + + self.columns = columns + if isstring(columns) or isinteger(columns): + # this is to check if it exists + self.colnums = [fitshdu._extract_colnum(columns)] + + self.is_scalar=True + self.columns_list = [columns] + else: + # this is to check if it exists + self.colnums = fitshdu._extract_colnums(columns) + + self.is_scalar=False + self.columns_list = columns + + self.fitshdu = fitshdu + + def read(self, **keys): + """ + Read the data from disk and return as a numpy array + """ + + if self.is_scalar: + data = self.fitshdu.read_column(self.columns, **keys) + else: + c=keys.get('columns',None) + if c is None: + keys['columns'] = self.columns + data = self.fitshdu.read(**keys) + + return data + + def __getitem__(self, arg): + """ + If columns are sent, then the columns will just get reset and + we'll return a new object + + If rows are sent, they are read and the result returned. + """ + + # we have to unpack the rows if we are reading a subset + # of the columns because our slice operator only works + # on whole rows. We could allow rows= keyword to + # be a slice... + + res, isrows, isslice = \ + self.fitshdu._process_args_as_rows_or_columns(arg, unpack=True) + if isrows: + # rows was entered: read all current column subset + return self.read(rows=res) + + # columns was entered. Return a subset objects + return TableColumnSubset(self.fitshdu, columns=res) + + + def __repr__(self): + """ + Representation for TableColumnSubset + """ + spacing = ' '*2 + cspacing = ' '*4 + + hdu = self.fitshdu + info = self.fitshdu._info + colinfo = info['colinfo'] + + text = [] + text.append("%sfile: %s" % (spacing,hdu._filename)) + text.append("%sextension: %d" % (spacing,info['hdunum']-1)) + text.append("%stype: %s" % (spacing,_hdu_type_map[info['hdutype']])) + text.append('%srows: %d' % (spacing,info['nrows'])) + text.append("%scolumn subset:" % spacing) + + cspacing = ' '*4 + nspace = 4 + nname = 15 + ntype = 6 + format = cspacing + "%-" + str(nname) + "s %" + str(ntype) + "s %s" + pformat = cspacing + "%-" + str(nname) + "s\n %" + str(nspace+nname+ntype) + "s %s" + + for colnum in self.colnums: + cinfo = colinfo[colnum] + + if len(cinfo['name']) > nname: + f = pformat + else: + f = format + + dt,isvar,istbit = hdu._get_tbl_numpy_dtype(colnum, include_endianness=False) + if isvar: + tform = cinfo['tform'] + if dt[0] == 'S': + dt = 'S0' + dimstr='vstring[%d]' % extract_vararray_max(tform) + else: + dimstr = 'varray[%s]' % extract_vararray_max(tform) + else: + dimstr = _get_col_dimstr(cinfo['tdim']) + + s = f % (cinfo['name'],dt,dimstr) + text.append(s) + + + s = "\n".join(text) + return s + + + + + + + + + + + +def extract_vararray_max(tform): + """ + Extract number from PX(number) + """ + + first=tform.find('(') + last=tform.rfind(')') + + if first == -1 or last == -1: + # no max length specified + return -1 + + maxnum=int(tform[first+1:last]) + return maxnum + +def check_extver(extver): + if extver is None: + return 0 + extver=int(extver) + if extver <= 0: + raise ValueError("extver must be > 0") + return extver + +def extract_filename(filename): + filename = mks(filename) + filename=filename.strip() + if filename[0] == "!": + filename=filename[1:] + filename = os.path.expandvars(filename) + filename = os.path.expanduser(filename) + return filename + +def tdim2shape(tdim, name, is_string=False): + shape=None + if tdim is None: + raise ValueError("field '%s' has malformed TDIM" % name) + + if len(tdim) > 1 or tdim[0] > 1: + if is_string: + shape = list( reversed(tdim[1:]) ) + else: + shape = list( reversed(tdim) ) + + if len(shape) == 1: + shape = shape[0] + else: + shape = tuple(shape) + + return shape + +def array2tabledef(data, table_type='binary', write_bitcols=False): + """ + Similar to descr2tabledef but if there are object columns a type + and max length will be extracted and used for the tabledef + """ + is_ascii = (table_type=='ascii') + + if data.dtype.fields is None: + raise ValueError("data must have fields") + names=[] + names_nocase={} + formats=[] + dims=[] + + descr=data.dtype.descr + for d in descr: + # these have the form ' 1: + send_dt=list(dt) + [this_data.shape[1:]] + _, form, dim = npy2fits(send_dt,table_type=table_type,write_bitcols=write_bitcols) + + formats.append(form) + dims.append(dim) + + return names, formats, dims + + +def descr2tabledef(descr, table_type='binary', write_bitcols=False): + """ + Create a FITS table def from the input numpy descriptor. + + parameters + ---------- + descr: list + A numpy recarray type descriptor array.dtype.descr + + returns + ------- + names, formats, dims: tuple of lists + These are the ttyp, tform and tdim header entries + for each field. dim entries may be None + """ + names=[] + formats=[] + dims=[] + + for d in descr: + + """ + npy_dtype = d[1][1:] + if is_ascii and npy_dtype in ['u1','i1']: + raise ValueError("1-byte integers are not supported for ascii tables") + """ + + name, form, dim = npy2fits(d,table_type=table_type,write_bitcols=write_bitcols) + + if name == '': + raise ValueError("field name is an empty string") + + """ + if is_ascii: + if dim is not None: + raise ValueError("array columns are not supported for ascii tables") + """ + + names.append(name) + formats.append(form) + dims.append(dim) + + return names, formats, dims + +def npy_obj2fits(data, name=None): + # this will be a variable length column 1Pt(len) where t is the + # type and len is max length. Each element must be convertible to + # the same type as the first + + if sys.version_info > (3,0,0): + stype=bytes + else: + stype=str + + if name is None: + d = data.dtype.descr + first=data[0] + else: + d = data[name].dtype.descr + first = data[name][0] + + # note numpy._string is an instance of str in python2, bytes + # in python3 + if isinstance(first, stype): + fits_dtype = _table_npy2fits_form['S'] + else: + arr0 = numpy.array(first,copy=False) + dtype0 = arr0.dtype + npy_dtype = dtype0.descr[0][1][1:] + if npy_dtype[0] == 'S': + raise ValueError("Field '%s' is an arrays of strings, this is " + "not allowed in variable length columns" % name) + if npy_dtype not in _table_npy2fits_form: + raise ValueError("Field '%s' has unsupported type '%s'" % (name,npy_dtype)) + fits_dtype = _table_npy2fits_form[npy_dtype] + + # Q uses 64-bit addressing, should try at some point but the cfitsio manual + # says it is experimental + #form = '1Q%s' % fits_dtype + form = '1P%s' % fits_dtype + dim=None + + return form, dim + + + +def npy2fits(d, table_type='binary', write_bitcols=False): + """ + d is the full element from the descr + """ + npy_dtype = d[1][1:] + if npy_dtype[0] == 'S': + name, form, dim = npy_string2fits(d,table_type=table_type) + else: + name, form, dim = npy_num2fits(d, table_type=table_type, write_bitcols=write_bitcols) + + return name, form, dim + +def npy_num2fits(d, table_type='binary', write_bitcols=False): + """ + d is the full element from the descr + + For vector,array columns the form is the total counts + followed by the code. + + For array columns with dimension greater than 1, the dim is set to + (dim1, dim2, ...) + So it is treated like an extra dimension + + """ + + dim = None + + name = d[0] + + npy_dtype = d[1][1:] + if npy_dtype[0] == 'S': + raise ValueError("got S type: use npy_string2fits") + + if npy_dtype not in _table_npy2fits_form: + raise ValueError("unsupported type '%s'" % npy_dtype) + + if table_type=='binary': + form = _table_npy2fits_form[npy_dtype] + else: + form = _table_npy2fits_form_ascii[npy_dtype] + + # now the dimensions + if len(d) > 2: + if table_type == 'ascii': + raise ValueError("Ascii table columns must be scalar, got %s" % str(d)) + + if write_bitcols and npy_dtype=='b1': + # multi-dimensional boolean + form = 'X' + + # Note, depending on numpy version, even 1-d can be a tuple + if isinstance(d[2], tuple): + count=reduce(lambda x, y: x*y, d[2]) + form = '%d%s' % (count,form) + + if len(d[2]) > 1: + # this is multi-dimensional array column. the form + # should be total elements followed by A + dim = list(reversed(d[2])) + dim = [str(e) for e in dim] + dim = '(' + ','.join(dim)+')' + else: + # this is a vector (1d array) column + count = d[2] + form = '%d%s' % (count,form) + + return name, form, dim + + +def npy_string2fits(d,table_type='binary'): + """ + d is the full element from the descr + + form for strings is the total number of bytes followed by A. Thus + for vector or array columns it is the size of the string times the + total number of elements in the array. + + Then the dim is set to + (sizeofeachstring, dim1, dim2, ...) + So it is treated like an extra dimension + + """ + + dim = None + + name = d[0] + + npy_dtype = d[1][1:] + if npy_dtype[0] != 'S': + raise ValueError("expected S type") + + # get the size of each string + string_size_str = npy_dtype[1:] + string_size = int(string_size_str) + + # now the dimensions + if len(d) == 2: + if table_type == 'ascii': + form = 'A'+string_size_str + else: + form = string_size_str+'A' + else: + if table_type == 'ascii': + raise ValueError("Ascii table columns must be scalar, got %s" % str(d)) + if isinstance(d[2], tuple): + # this is an array column. the form + # should be total elements followed by A + #count = 1 + #count = [count*el for el in d[2]] + count=reduce(lambda x, y: x*y, d[2]) + count = string_size*count + form = '%dA' % count + + # will have to do tests to see if this is the right order + dim = list(reversed(d[2])) + #dim = d[2] + dim = [string_size_str] + [str(e) for e in dim] + dim = '(' + ','.join(dim)+')' + else: + # this is a vector (1d array) column + count = string_size*d[2] + form = '%dA' % count + + # will have to do tests to see if this is the right order + dim = [string_size_str, str(d[2])] + dim = '(' + ','.join(dim)+')' + + return name, form, dim + +class FITSHDR(object): + """ + A class representing a FITS header. + + parameters + ---------- + record_list: optional + A list of dicts, or dict, or another FITSHDR + - list of dictionaries containing 'name','value' and optionally + a 'comment' field; the order is preserved. + - a dictionary of keyword-value pairs; no comments are written + in this case, and the order is arbitrary. + - another FITSHDR object; the order is preserved. + convert: bool, optional + If True, convert strings. E.g. '3' gets + converted to 3 and "'hello'" gets converted + to 'hello' and 'T'/'F' to True/False. Default + is False. + + If the input is a card string, convert is implied True + + examples: + + hdr=FITSHDR() + + # set a simple value + hdr['blah'] = 35 + + # set from a dict to include a comment. + rec={'name':'fromdict', 'value':3, 'comment':'my comment'} + hdr.add_record(rec) + + # can do the same with a full FITSRecord + rec=FITSRecord( {'name':'temp', 'value':35, 'comment':'temp in C'} ) + hdr.add_record(rec) + + # in the above, the record is replaced if one with the same name + # exists, except for COMMENT and HISTORY, which can exist as + # duplicates + + # print the header + print(hdr) + + # print a single record + print(hdr['fromdict']) + + + # can also set from a card + hdr.add_record('test = 77') + # using a FITSRecord object (internally uses FITSCard) + card=FITSRecord('test = 77') + hdr.add_record(card) + + # can also construct with a record list + recs=[{'name':'test', 'value':35, 'comment':'a comment'}, + {'name':'blah', 'value':'some string'}] + hdr=FITSHDR(recs) + + # if you have no comments, you can construct with a simple dict + recs={'day':'saturday', + 'telescope':'blanco'} + hdr=FITSHDR(recs) + + """ + def __init__(self, record_list=None, convert=False): + + self._record_list = [] + self._record_map = {} + self._index_map={} + + if isinstance(record_list,FITSHDR): + for r in record_list.records(): + self.add_record(r, convert=convert) + elif isinstance(record_list, dict): + for k in record_list: + r = {'name':k, 'value':record_list[k]} + self.add_record(r, convert=convert) + elif isinstance(record_list, list): + for r in record_list: + self.add_record(r, convert=convert) + elif record_list is not None: + raise ValueError("expected a dict or list of dicts or FITSHDR") + + + def add_record(self, record_in, convert=False): + """ + Add a new record. Strip quotes from around strings. + + This will over-write if the key already exists, except + for COMMENT and HISTORY fields + + parameters + ----------- + record: + The record, either a dict or a header card string + or a FITSRecord or FITSCard + convert: bool, optional + If True, convert strings. E.g. '3' gets + converted to 3 and "'hello'" gets converted + to 'hello' and 'T'/'F' to True/False. Default + is False. + + If the input is a card string, convert is implied True + """ + record = FITSRecord(record_in, convert=convert) + + # only append when this name already exists if it is + # a comment or history field, otherwise simply over-write + key=record['name'].upper() + + key_exists = key in self._record_map + + if not key_exists or key in ('COMMENT','HISTORY','CONTINUE'): + # append new record + self._record_list.append(record) + index=len(self._record_list)-1 + self._index_map[key] = index + else: + # over-write existing + index = self._index_map[key] + self._record_list[index] = record + + self._record_map[key] = record + + def _add_to_map(self, record): + key=record['name'].upper() + self._record_map[key] = record + + def get_comment(self, item): + """ + Get the comment for the requested entry + """ + key=item.upper() + if key not in self._record_map: + raise ValueError("unknown record: %s" % key) + if 'comment' not in self._record_map[key]: + return None + else: + return self._record_map[key]['comment'] + + def records(self): + """ + Return the list of full records as a list of dictionaries. + """ + return self._record_list + + def keys(self): + """ + Return a copy of the current key list. + """ + return [e['name'] for e in self._record_list] + + def delete(self, name): + """ + Delete the specified entry if it exists. + """ + if isinstance(name, (list,tuple)): + for xx in name: + self.delete(xx) + else: + if name in self._record_map: + del self._record_map[name] + self._record_list = [r for r in self._record_list if r['name'] != name] + + def clean(self, is_table=False): + """ + Remove reserved keywords from the header. + + These are keywords that the fits writer must write in order + to maintain consistency between header and data. + + keywords + -------- + is_table: bool, optional + Set True if this is a table, so extra keywords will be cleaned + """ + + rmnames = ['SIMPLE','EXTEND','XTENSION','BITPIX','PCOUNT','GCOUNT', + 'THEAP', + 'EXTNAME', + 'BLANK', + 'ZQUANTIZ','ZDITHER0','ZIMAGE','ZCMPTYPE', + 'ZSIMPLE','ZTENSION','ZPCOUNT','ZGCOUNT', + 'ZBITPIX','ZEXTEND', + #'FZTILELN','FZALGOR', + 'CHECKSUM','DATASUM'] + + if is_table: + # these are not allowed in tables + rmnames += [ + 'BUNIT','BSCALE','BZERO', + ] + + self.delete(rmnames) + + r = self._record_map.get('NAXIS',None) + if r is not None: + naxis = int(r['value']) + self.delete('NAXIS') + + rmnames = ['NAXIS%d' % i for i in xrange(1,naxis+1)] + self.delete(rmnames) + + r = self._record_map.get('ZNAXIS',None) + self.delete('ZNAXIS') + if r is not None: + + znaxis = int(r['value']) + + rmnames = ['ZNAXIS%d' % i for i in xrange(1,znaxis+1)] + self.delete(rmnames) + rmnames = ['ZTILE%d' % i for i in xrange(1,znaxis+1)] + self.delete(rmnames) + rmnames = ['ZNAME%d' % i for i in xrange(1,znaxis+1)] + self.delete(rmnames) + rmnames = ['ZVAL%d' % i for i in xrange(1,znaxis+1)] + self.delete(rmnames) + + + r = self._record_map.get('TFIELDS',None) + if r is not None: + tfields = int(r['value']) + self.delete('TFIELDS') + + if tfields > 0: + + nbase = ['TFORM','TTYPE','TDIM','TUNIT','TSCAL','TZERO', + 'TNULL','TDISP','TDMIN','TDMAX','TDESC','TROTA', + 'TRPIX','TRVAL','TDELT','TCUNI', + #'FZALG' + ] + for i in xrange(1,tfields+1): + names=['%s%d' % (n,i) for n in nbase] + self.delete(names) + + + def get(self, item, default_value=None): + """ + Get the requested header entry by keyword name + """ + + found, name = self._contains_and_name(item) + if found: + return self._record_map[name]['value'] + else: + return default_value + + def __len__(self): + return len(self._record_list) + + def __contains__(self, item): + found, _ = self._contains_and_name(item) + return found + + def _contains_and_name(self, item): + + if isinstance(item, FITSRecord): + name=item['name'] + elif isinstance(item, dict): + name=item.get('name',None) + if name is None: + raise ValueError("dict record must have 'name' field") + else: + name=item + + found=False + name=name.upper() + if name in self._record_map: + found=True + elif name[0:8] == 'HIERARCH': + if len(name) > 9: + name = name[9:] + if name in self._record_map: + found=True + + return found, name + + def __setitem__(self, item, value): + if isinstance(value, (dict,FITSRecord)): + if item.upper() != value['name'].upper(): + raise ValueError("when setting using a FITSRecord, the " + "name field must match") + rec=value + else: + rec = {'name':item, 'value':value} + + self.add_record(rec) + + def __getitem__(self, item): + if item not in self: + raise KeyError("unknown record: %s" % item) + + return self.get(item) + + + def __iter__(self): + self._current=0 + return self + + def next(self): + """ + for iteration over the header entries + """ + if self._current < len(self._record_list): + rec=self._record_list[self._current] + key=rec['name'] + self._current += 1 + return key + else: + raise StopIteration + __next__=next + + def _record2card(self, record): + """ + when we add new records they don't have a card, + this sort of fakes it up similar to what cfitsio + does, just for display purposes. e.g. + + DBL = 23.299843 + LNG = 3423432 + KEYSNC = 'hello ' + KEYSC = 'hello ' / a comment for string + KEYDC = 3.14159265358979 / a comment for pi + KEYLC = 323423432 / a comment for long + + basically, + - 8 chars, left aligned, for the keyword name + - a space + - 20 chars for value, left aligned for strings, right aligned for + numbers + - if there is a comment, one space followed by / then another space + then the comment out to 80 chars + + """ + name = record['name'] + value = record['value'] + + v_isstring=isstring(value) + + if name == 'COMMENT': + card = 'COMMENT %s' % value + elif name == 'CONTINUE': + card = 'CONTINUE %s' % value + elif name=='HISTORY': + card = 'HISTORY %s' % value + else: + if len(name) > 8: + card = 'HIERARCH %s= ' % name + else: + card = '%-8s= ' % name[0:8] + + # these may be string representations of data, or actual strings + if v_isstring: + value = str(value) + if len(value) > 0: + if value[0] != "'": + # this is a string representing a string header field + # make it look like it will look in the header + value = "'" + value + "'" + vstr = '%-20s' % value + else: + vstr = "%20s" % value + else: + vstr="''" + else: + vstr = '%20s' % value + + card += vstr + + if 'comment' in record: + card += ' / %s' % record['comment'] + + if v_isstring and len(card) > 80: + card=card[0:79] + "'" + else: + card=card[0:80] + + return card + + def __repr__(self): + rep=[''] + for r in self._record_list: + if 'card_string' not in r: + card = self._record2card(r) + else: + card = r['card_string'] + + rep.append(card) + return '\n'.join(rep) + + +class FITSRecord(dict): + """ + Class to represent a FITS header record + + parameters + ---------- + record: string or dict + If a string, it should represent a FITS header card + + If a dict it should have 'name' and 'value' fields. + Can have a 'comment' field. + + examples + -------- + + # from a dict. Can include a comment + rec=FITSRecord( {'name':'temp', 'value':35, 'comment':'temperature in C'} ) + + # from a card + card=FITSRecord('test = 77 / My comment') + + """ + def __init__(self, record, convert=False): + self.set_record(record, convert=convert) + + def set_record(self, record, convert=False): + """ + check the record is valid and convert to a dict + + parameters + ---------- + record: string + Dict representing a record or a string representing a FITS header + card + convert: bool, optional + If True, convert strings. E.g. '3' gets + converted to 3 and "'hello'" gets converted + to 'hello' and 'T'/'F' to True/False. Default + is False. + + If the input is a card string, convert is implied True + """ + import copy + + if isstring(record): + card=FITSCard(record) + self.update(card) + + self.verify() + + else: + + if isinstance(record,FITSRecord): + self.update(record) + elif isinstance(record,dict): + # if the card is present, always construct the record from that + if 'card_string' in record: + self.set_record(record['card_string']) + else: + # we will need to verify it + self.update(record) + else: + raise ValueError("record must be a string card or " + "dictionary or FITSRecord") + + self.verify() + + if convert: + self['value_orig'] = copy.copy(self['value']) + if isstring(self['value']): + self['value'] = self._convert_value(self['value_orig']) + + def verify(self): + """ + make sure name,value exist + """ + if 'name' not in self: + raise ValueError("each record must have a 'name' field") + if 'value' not in self: + raise ValueError("each record must have a 'value' field") + + def _convert_value(self, value_orig): + """ + things like 6 and 1.25 are converted with ast.literal_value + + Things like 'hello' are stripped of quotes + """ + import ast + if value_orig is None: + return value_orig + + try: + avalue = ast.parse(value_orig).body[0].value + if isinstance(avalue,ast.BinOp): + # this is probably a string that happens to look like + # a binary operation, e.g. '25-3' + value = value_orig + else: + value = ast.literal_eval(value_orig) + except: + value = self._convert_quoted_string(value_orig) + + if isinstance(value,int) and '_' in value_orig: + value = value_orig + + return value + + def _convert_quoted_string(self, value): + """ + Possibly remove quotes around strings. Deal with bool + """ + # Strip extra quotes from strings if needed + if value.startswith("'") and value.endswith("'"): + val = value[1:-1] + elif value=='T': + val=True + elif value=='F': + val=False + else: + val=value + + return val + +TYP_STRUC_KEY=10 +TYP_CMPRS_KEY= 20 +TYP_SCAL_KEY = 30 +TYP_NULL_KEY = 40 +TYP_DIM_KEY = 50 +TYP_RANG_KEY = 60 +TYP_UNIT_KEY = 70 +TYP_DISP_KEY = 80 +TYP_HDUID_KEY= 90 +TYP_CKSUM_KEY= 100 +TYP_WCS_KEY = 110 +TYP_REFSYS_KEY= 120 +TYP_COMM_KEY = 130 +TYP_CONT_KEY = 140 +TYP_USER_KEY = 150 + +class FITSCard(FITSRecord): + """ + class to represent ordinary FITS cards. + + CONTINUE not supported + + examples + -------- + + # from a card + card=FITSRecord('test = 77 / My comment') + """ + def __init__(self, card_string): + self.set_card(card_string) + + def set_card(self, card_string): + self['card_string']=card_string + + self._check_hierarch() + + if self._is_hierarch: + self._set_as_key() + else: + self._check_equals() + + self._check_type() + self._check_len() + + front=card_string[0:7] + if (not self.has_equals() or front in ['COMMENT', 'HISTORY', 'CONTINU']): + + if front=='HISTORY': + self._set_as_history() + elif front=='CONTINU': + self._set_as_continue() + else: + # note anything without an = and not history is + # treated as comment; this is built into cfitsio + # as well + self._set_as_comment() + + if self.has_equals(): + mess=("warning: It is not FITS-compliant for a %s header card to include " + "an = sign. There may be slight inconsistencies if you write this " + "back out to a file.") + mess = mess % (card_string[:8]) + warnings.warn(mess, FITSRuntimeWarning) + else: + self._set_as_key() + + def has_equals(self): + """ + True if = is in position 8 + """ + return self._has_equals + + def _check_hierarch(self): + card_string=self['card_string'] + if card_string[0:8].upper() == 'HIERARCH': + self._is_hierarch=True + else: + self._is_hierarch=False + + def _check_equals(self): + """ + check for = in position 8, set attribute _has_equals + """ + card_string=self['card_string'] + if len(card_string) < 9: + self._has_equals=False + elif card_string[8]=='=': + self._has_equals=True + else: + self._has_equals=False + + def _set_as_key(self): + card_string=self['card_string'] + res=_fitsio_wrap.parse_card(card_string) + if len(res)==5: + keyclass, name, value, dtype, comment=res + else: + keyclass, name, dtype, comment=res + value=None + + if keyclass==TYP_CONT_KEY: + raise ValueError("bad card '%s'. CONTINUE not " + "supported" % card_string) + + self['class'] = keyclass + self['name'] = name + self['value_orig'] = value + self['value'] = self._convert_value(value) + self['dtype'] = dtype + self['comment'] = comment + + def _set_as_comment(self): + comment=self._extract_comm_or_hist_value() + + self['class'] = TYP_COMM_KEY + self['name'] = 'COMMENT' + self['value'] = comment + + def _set_as_history(self): + history=self._extract_comm_or_hist_value() + + self['class'] = TYP_COMM_KEY + self['name'] = 'HISTORY' + self['value'] = history + + def _set_as_continue(self): + value=self._extract_comm_or_hist_value() + + self['class'] = TYP_CONT_KEY + self['name'] = 'CONTINUE' + self['value'] = value + + def _extract_comm_or_hist_value(self): + card_string=self['card_string'] + if self._has_equals: + if len(card_string) >= 9: + value=card_string[9:] + else: + value='' + else: + if len(card_string) >= 8: + #value=card_string[7:] + value=card_string[8:] + else: + value='' + return value + + def _check_type(self): + card_string=self['card_string'] + if not isstring(card_string): + raise TypeError("card must be a string, got type %s" % type(card_string)) + + def _check_len(self): + ln=len(self['card_string']) + if ln > 80: + mess="len(card) is %d. cards must have length < 80" + raise ValueError(mess) + +def get_tile_dims(tile_dims, imshape): + """ + Just make sure the tile dims has the appropriate number of dimensions + """ + + if tile_dims is None: + td=None + else: + td = numpy.array(tile_dims, dtype='i8') + nd=len(imshape) + if td.size != nd: + msg="expected tile_dims to have %d dims, got %d" % (td.size,nd) + raise ValueError(msg) + + return td + +def get_compress_type(compress): + if compress is not None: + compress = str(compress).upper() + if compress not in _compress_map: + raise ValueError("compress must be one of %s" % list(_compress_map.keys())) + return _compress_map[compress] + +def check_comptype_img(comptype, dtype_str): + + if comptype == NOCOMPRESS: + return + + #if dtype_str == 'i8': + # no i8 allowed for tile-compressed images + # raise ValueError("8-byte integers not supported when using tile compression") + + if comptype == PLIO_1: + # no unsigned u4/u8 for plio + if dtype_str == 'u4' or dtype_str == 'u8': + raise ValueError("Unsigned 4/8-byte integers currently not " + "allowed when writing using PLIO " + "tile compression") + +def isstring(arg): + return isinstance(arg, _stypes) + +def isinteger(arg): + return isinstance(arg, _itypes) + +def mks(val): + """ + make sure the value is a string, paying mind to python3 vs 2 + """ + if sys.version_info > (3,0,0): + if isinstance(val, bytes): + sval = str(val, 'utf-8') + else: + sval = str(val) + else: + sval = str(val) + + return sval + +def fields_are_object(arr): + isobj=numpy.zeros(len(arr.dtype.names),dtype=numpy.bool) + for i,name in enumerate(arr.dtype.names): + if is_object(arr[name]): + isobj[i] = True + return isobj +def is_object(arr): + if arr.dtype.descr[0][1][1] == 'O': + return True + else: + return False + +def array_to_native_c(array_in, inplace=False): + # copy only made if not C order + arr=numpy.array(array_in, order='C', copy=False) + return array_to_native(arr, inplace=inplace) + + +def array_to_native(array, inplace=False): + if numpy.little_endian: + machine_little=True + else: + machine_little=False + + data_little=False + if array.dtype.names is None: + + if array.dtype.base.byteorder=='|': + # strings and 1 byte integers + return array + + data_little = is_little_endian(array) + else: + # assume all are same byte order: we only need to find one with + # little endian + for fname in array.dtype.names: + if is_little_endian(array[fname]): + data_little=True + break + + if ( (machine_little and not data_little) + or (not machine_little and data_little) ): + output = array.byteswap(inplace) + else: + output = array + + return output + + + +def is_little_endian(array): + """ + Return True if array is little endian, False otherwise. + + Parameters + ---------- + array: numpy array + A numerical python array. + + Returns + ------- + Truth value: + True for little-endian + + Notes + ----- + Strings are neither big or little endian. The input must be a simple numpy + array, not an array with fields. + + """ + + if numpy.little_endian: + machine_little=True + else: + machine_little=False + + byteorder = array.dtype.base.byteorder + return (byteorder == '<') or (machine_little and byteorder == '=') + + +def _extract_table_type(type): + """ + Get the numerical table type + """ + if isinstance(type,str): + type=type.lower() + if type[0:7] == 'binary': + table_type = BINARY_TBL + elif type[0:6] == 'ascii': + table_type = ASCII_TBL + else: + raise ValueError("table type string should begin with 'binary' or 'ascii' (case insensitive)") + else: + type=int(type) + if type not in [BINARY_TBL,ASCII_TBL]: + raise ValueError("table type num should be BINARY_TBL (%d) or ASCII_TBL (%d)" % (BINARY_TBL,ASCII_TBL)) + table_type=type + + return table_type + + +def _names_to_lower_if_recarray(data): + if data.dtype.names is not None: + data.dtype.names = [n.lower() for n in data.dtype.names] +def _names_to_upper_if_recarray(data): + if data.dtype.names is not None: + data.dtype.names = [n.upper() for n in data.dtype.names] + +def _trim_strings(data): + names=data.dtype.names + if names is not None: + # run through each field separately + for n in names: + if data[n].dtype.descr[0][1][1] == 'S': + data[n] = numpy.char.rstrip(data[n]) + + else: + if data.dtype.descr[0][1][1] == 'S': + data[:] = numpy.char.rstrip(data[:]) + +def _convert_full_start_to_offset(dims, start): + # convert to scalar offset + # note we use the on-disk data type to get itemsize + ndim=len(dims) + + # convert sequence to pixel start + if len(start) != ndim: + m="start has len %d, which does not match requested dims %d" + raise ValueError(m % (len(start),ndim)) + + # this is really strides / itemsize + strides=[1] + for i in xrange(1,ndim): + strides.append( strides[i-1] * dims[ndim-i] ) + + strides.reverse() + s=start + start_index = sum( [s[i]*strides[i] for i in xrange(ndim)] ) + + return start_index + + + +_compress_map={None:NOCOMPRESS, + 'RICE': RICE_1, + 'RICE_1': RICE_1, + 'GZIP': GZIP_1, + 'GZIP_1': GZIP_1, + 'GZIP_2': GZIP_2, + 'PLIO': PLIO_1, + 'PLIO_1': PLIO_1, + 'HCOMPRESS': HCOMPRESS_1, + 'HCOMPRESS_1': HCOMPRESS_1, + NOCOMPRESS:None, + RICE_1:'RICE_1', + GZIP_1:'GZIP_1', + GZIP_2:'GZIP_2', + PLIO_1:'PLIO_1', + HCOMPRESS_1:'HCOMPRESS_1'} + +_modeprint_map = {'r':'READONLY','rw':'READWRITE', 0:'READONLY',1:'READWRITE'} +_char_modemap = {'r':'r','rw':'rw', + READONLY:'r',READWRITE:'rw'} +_int_modemap = {'r':READONLY,'rw':READWRITE, READONLY:READONLY, READWRITE:READWRITE} +_hdu_type_map = {IMAGE_HDU:'IMAGE_HDU', + ASCII_TBL:'ASCII_TBL', + BINARY_TBL:'BINARY_TBL', + 'IMAGE_HDU':IMAGE_HDU, + 'ASCII_TBL':ASCII_TBL, + 'BINARY_TBL':BINARY_TBL} + +# no support yet for complex +_table_fits2npy = {1: 'i1', + 11: 'u1', + 12: 'i1', + 14: 'b1', # logical. Note pyfits uses this for i1, cfitsio casts to char* + 16: 'S', + 20: 'u2', + 21: 'i2', + 30: 'u4', # 30=TUINT + 31: 'i4', # 31=TINT + 40: 'u4', # 40=TULONG + 41: 'i4', # 41=TLONG + 42: 'f4', + 81: 'i8', + 82: 'f8', + 83: 'c8', # TCOMPLEX + 163: 'c16'} # TDBLCOMPLEX + +# cfitsio returns only types f8, i4 and strings for column types. in order to +# avoid data loss, we always use i8 for integer types +_table_fits2npy_ascii = {16: 'S', + 31: 'i8', # listed as TINT, reading as i8 + 41: 'i8', # listed as TLONG, reading as i8 + 81: 'i8', + 21: 'i4', # listed as TSHORT, reading as i4 + 42: 'f8', # listed as TFLOAT, reading as f8 + 82: 'f8'} + + +# for TFORM +_table_npy2fits_form = {'b1':'L', + 'u1':'B', + 'i1':'S', # gets converted to unsigned + 'S' :'A', + 'u2':'U', # gets converted to signed + 'i2':'I', + 'u4':'V', # gets converted to signed + 'i4':'J', + 'i8':'K', + 'f4':'E', + 'f8':'D', + 'c8':'C', + 'c16':'M'} + +_table_npy2fits_form_ascii = {'S' :'A1', # Need to add max here + 'i2':'I7', # I + 'i4':'I12', # ?? + #'i8':'I21', # K # i8 aren't supported + #'f4':'E15.7', # F + 'f4':'E26.17', # F We must write as f8 since we can only read as f8 + 'f8':'E26.17'} # D 25.16 looks right, but this is recommended + +# from mrdfits; note G gets turned into E +# types= ['A', 'I', 'L', 'B', 'F', 'D', 'C', 'M', 'K'] +# formats=['A1', 'I6', 'I10', 'I4', 'G15.9','G23.17', 'G15.9', 'G23.17','I20'] + + + +# remember, you should be using the equivalent image type for this +_image_bitpix2npy = {8: 'u1', + 10: 'i1', + 16: 'i2', + 20: 'u2', + 32: 'i4', + 40: 'u4', + 64: 'i8', + -32: 'f4', + -64: 'f8'} + +# for header keywords +_ftypes = (float,numpy.float32,numpy.float64) + +if sys.version_info > (3,0,0): + _itypes=(int,) + _stypes = (str,bytes) +else: + _itypes=(int,long) + _stypes = (basestring,unicode,) + +_itypes += (numpy.uint8,numpy.int8, + numpy.uint16,numpy.int16, + numpy.uint32,numpy.int32, + numpy.uint64,numpy.int64) + +# different for py3 +_stypes += (numpy.string_,numpy.str_) + diff --git a/fitsio/test.py b/fitsio/test.py new file mode 100644 index 0000000..32b284e --- /dev/null +++ b/fitsio/test.py @@ -0,0 +1,2187 @@ +from __future__ import with_statement, print_function +import sys, os +import tempfile +import warnings +import numpy +from numpy import arange, array +import fitsio + +import unittest + +if sys.version_info > (3,0,0): + stype=(str,bytes) +else: + stype=str + +try: + xrange=xrange +except: + xrange=range + +def test(): + suite_warnings = unittest.TestLoader().loadTestsFromTestCase(TestWarnings) + res1=unittest.TextTestRunner(verbosity=2).run(suite_warnings).wasSuccessful() + + suite = unittest.TestLoader().loadTestsFromTestCase(TestReadWrite) + res2=unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful() + + if not res1 or not res2: + sys.exit(1) + +class TestWarnings(unittest.TestCase): + """ + tests of warnings + + TODO: write test cases for bad column size + """ + def setUp(self): + pass + + def testNonStandardKeyValue(self): + fname=tempfile.mktemp(prefix='fitsio-TestWarning-',suffix='.fits') + + im=numpy.zeros( (3,3) ) + with warnings.catch_warnings(record=True) as w: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + fits.write(im) + # now write a key with a non-standard value + value={'test':3} + fits[-1].write_key("odd",value) + + assert len(w) == 1 + assert issubclass(w[-1].category, fitsio.FITSRuntimeWarning) + +class TestReadWrite(unittest.TestCase): + def setUp(self): + + + + nvec = 2 + ashape=(21,21) + Sdtype = 'S6' + # all currently available types, scalar, 1-d and 2-d array columns + dtype=[('u1scalar','u1'), + ('i1scalar','i1'), + ('b1scalar','?'), + ('u2scalar','u2'), + ('i2scalar','i2'), + ('u4scalar','u4'), + ('i4scalar','f8'), + ('c8scalar','c8'), # complex, two 32-bit + ('c16scalar','c16'), # complex, two 32-bit + + ('u1vec','u1',nvec), + ('i1vec','i1',nvec), + ('b1vec','?',nvec), + ('u2vec','u2',nvec), + ('i2vec','i2',nvec), + ('u4vec','u4',nvec), + ('i4vec','i4',nvec), + ('i8vec','i8',nvec), + ('f4vec','f4',nvec), + ('f8vec','f8',nvec), + ('c8vec','c8',nvec), + ('c16vec','c16',nvec), + + ('u1arr','u1',ashape), + ('i1arr','i1',ashape), + ('b1arr','?',ashape), + ('u2arr','u2',ashape), + ('i2arr','i2',ashape), + ('u4arr','u4',ashape), + ('i4arr','i4',ashape), + ('i8arr','i8',ashape), + ('f4arr','f4',ashape), + ('f8arr','f8',ashape), + ('c8arr','c8',ashape), + ('c16arr','c16',ashape), + + ('Sscalar',Sdtype), + ('Svec', Sdtype, nvec), + ('Sarr', Sdtype, ashape)] + + dtype2=[('index','i4'), + ('x','f8'), + ('y','f8')] + + nrows=4 + data=numpy.zeros(nrows, dtype=dtype) + + dtypes=['u1','i1','u2','i2','u4','i4','i8','f4','f8','c8','c16'] + for t in dtypes: + if t in ['c8','c16']: + data[t+'scalar'] = [complex(i+1,(i+1)*2) for i in xrange(nrows)] + vname=t+'vec' + for row in xrange(nrows): + for i in xrange(nvec): + index=(row+1)*(i+1) + data[vname][row,i] = complex(index,index*2) + aname=t+'arr' + for row in xrange(nrows): + for i in xrange(ashape[0]): + for j in xrange(ashape[1]): + index=(row+1)*(i+1)*(j+1) + data[aname][row,i,j] = complex(index,index*2) + + else: + data[t+'scalar'] = 1 + numpy.arange(nrows, dtype=t) + data[t+'vec'] = 1 + numpy.arange(nrows*nvec,dtype=t).reshape(nrows,nvec) + arr = 1 + numpy.arange(nrows*ashape[0]*ashape[1],dtype=t) + data[t+'arr'] = arr.reshape(nrows,ashape[0],ashape[1]) + + for t in ['b1']: + data[t+'scalar'] = (numpy.arange(nrows) % 2 == 0).astype('?') + data[t+'vec'] = (numpy.arange(nrows*nvec) % 2 == 0).astype('?').reshape(nrows,nvec) + arr = (numpy.arange(nrows*ashape[0]*ashape[1]) % 2 == 0).astype('?') + data[t+'arr'] = arr.reshape(nrows,ashape[0],ashape[1]) + + + # strings get padded when written to the fits file. And the way I do + # the read, I read all bytes (ala mrdfits) so the spaces are preserved. + # + # so we need to pad out the strings with blanks so we can compare + + data['Sscalar'] = ['%-6s' % s for s in ['hello','world','good','bye']] + data['Svec'][:,0] = '%-6s' % 'hello' + data['Svec'][:,1] = '%-6s' % 'world' + + s = 1 + numpy.arange(nrows*ashape[0]*ashape[1]) + s = ['%-6s' % el for el in s] + data['Sarr'] = numpy.array(s).reshape(nrows,ashape[0],ashape[1]) + + self.data = data + + # use a dict list so we can have comments + self.keys = [{'name':'test1','value':35}, + {'name':'test2','value':'stuff','comment':'this is a string keyword'}, + {'name':'dbl', 'value':23.299843,'comment':"this is a double keyword"}, + {'name':'lng','value':3423432,'comment':'this is a long keyword'}] + + # a second extension using the convenience function + nrows2=10 + data2 = numpy.zeros(nrows2, dtype=dtype2) + data2['index'] = numpy.arange(nrows2,dtype='i4') + data2['x'] = numpy.arange(nrows2,dtype='f8') + data2['y'] = numpy.arange(nrows2,dtype='f8') + self.data2 = data2 + + + + # + # ascii table + # + + nvec = 2 + ashape = (2,3) + Sdtype = 'S6' + + # we support writing i2, i4, i8, f4 f8, but when reading cfitsio always + # reports their types as i4 and f8, so can't really use i8 and we are + # forced to read all floats as f8 precision + + adtype=[('i2scalar','i2'), + ('i4scalar','i4'), + #('i8scalar','i8'), + ('f4scalar','f4'), + ('f8scalar','f8'), + ('Sscalar',Sdtype)] + nrows=4 + try: + tdt = numpy.dtype(adtype, align=True) + except TypeError: # older numpy may not understand `align` argument + tdt = numpy.dtype(adtype) + adata=numpy.zeros(nrows, dtype=tdt) + + adata['i2scalar'][:] = -32222 + numpy.arange(nrows,dtype='i2') + adata['i4scalar'][:] = -1353423423 + numpy.arange(nrows,dtype='i4') + #adata['i8scalar'][:] = -9223372036854775807 + numpy.arange(nrows,dtype='i8') + adata['f4scalar'][:] = -2.55555555555555555555555e35 + numpy.arange(nrows,dtype='f4')*1.e35 + adata['f8scalar'][:] = -2.55555555555555555555555e110 + numpy.arange(nrows,dtype='f8')*1.e110 + adata['Sscalar'] = ['hello','world','good','bye'] + + self.ascii_data = adata + + + + + # + # for variable length columns + # + + # all currently available types, scalar, 1-d and 2-d array columns + dtype=[('u1scalar','u1'), + ('u1obj','O'), + ('i1scalar','i1'), + ('i1obj','O'), + ('u2scalar','u2'), + ('u2obj','O'), + ('i2scalar','i2'), + ('i2obj','O'), + ('u4scalar','u4'), + ('u4obj','O'), + ('i4scalar','f8'), + ('f8obj','O'), + + ('u1vec','u1',nvec), + ('i1vec','i1',nvec), + ('u2vec','u2',nvec), + ('i2vec','i2',nvec), + ('u4vec','u4',nvec), + ('i4vec','i4',nvec), + ('i8vec','i8',nvec), + ('f4vec','f4',nvec), + ('f8vec','f8',nvec), + + ('u1arr','u1',ashape), + ('i1arr','i1',ashape), + ('u2arr','u2',ashape), + ('i2arr','i2',ashape), + ('u4arr','u4',ashape), + ('i4arr','i4',ashape), + ('i8arr','i8',ashape), + ('f4arr','f4',ashape), + ('f8arr','f8',ashape), + + ('Sscalar',Sdtype), + ('Sobj','O'), + ('Svec', Sdtype, nvec), + ('Sarr', Sdtype, ashape)] + + dtype2=[('index','i4'), + ('x','f8'), + ('y','f8')] + + nrows=4 + data=numpy.zeros(nrows, dtype=dtype) + + for t in ['u1','i1','u2','i2','u4','i4','i8','f4','f8']: + data[t+'scalar'] = 1 + numpy.arange(nrows, dtype=t) + data[t+'vec'] = 1 + numpy.arange(nrows*nvec,dtype=t).reshape(nrows,nvec) + arr = 1 + numpy.arange(nrows*ashape[0]*ashape[1],dtype=t) + data[t+'arr'] = arr.reshape(nrows,ashape[0],ashape[1]) + + for i in xrange(nrows): + data[t+'obj'][i] = data[t+'vec'][i] + + + # strings get padded when written to the fits file. And the way I do + # the read, I real all bytes (ala mrdfits) so the spaces are preserved. + # + # so for comparisons, we need to pad out the strings with blanks so we + # can compare + + data['Sscalar'] = ['%-6s' % s for s in ['hello','world','good','bye']] + data['Svec'][:,0] = '%-6s' % 'hello' + data['Svec'][:,1] = '%-6s' % 'world' + + s = 1 + numpy.arange(nrows*ashape[0]*ashape[1]) + s = ['%-6s' % el for el in s] + data['Sarr'] = numpy.array(s).reshape(nrows,ashape[0],ashape[1]) + + for i in xrange(nrows): + data['Sobj'][i] = data['Sscalar'][i].rstrip() + + self.vardata = data + + # + # for bitcol columns + # + nvec = 2 + ashape=(21,21) + + dtype=[('b1vec','?',nvec), + + ('b1arr','?',ashape)] + + nrows=4 + data=numpy.zeros(nrows, dtype=dtype) + + for t in ['b1']: + data[t+'vec'] = (numpy.arange(nrows*nvec) % 2 == 0).astype('?').reshape(nrows,nvec) + arr = (numpy.arange(nrows*ashape[0]*ashape[1]) % 2 == 0).astype('?') + data[t+'arr'] = arr.reshape(nrows,ashape[0],ashape[1]) + + self.bdata = data + + + + def testHeaderWriteRead(self): + """ + Test a basic header write and read + """ + + fname=tempfile.mktemp(prefix='fitsio-HeaderWrite-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + data=numpy.zeros(10) + header={ + 'x':35, + 'y':88.215, + 'funky':'35-8', # test old bug when strings look + #like expressions + 'name':'J. Smith', + 'und':None, + 'binop':'25-3', # test string with binary operation in it + 'unders':'1_000_000', # test string with underscore + } + fits.write_image(data, header=header) + + rh = fits[0].read_header() + self.check_header(header, rh) + + with fitsio.FITS(fname) as fits: + rh = fits[0].read_header() + self.check_header(header, rh) + + finally: + if os.path.exists(fname): + os.remove(fname) + + def testHeaderContinue(self): + """ + Test a header with CONTINUE keys + """ + fname=tempfile.mktemp(prefix='fitsio-HeaderContinue-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + data=numpy.zeros(10) + header = [ + "SVALUE = 'This is a long string value &' ", + "CONTINUE 'extending& ' ", + "CONTINUE ' over 3 lines.' / and a comment ", + "TEST = 10 / another key", + ] + fits.write_image(data, header=header) + + rh = fits[0].read_header() + assert rh.keys().count('CONTINUE') == 2 + + with fitsio.FITS(fname) as fits: + rh = fits[0].read_header() + assert rh.keys().count('CONTINUE') == 2 + + finally: + if os.path.exists(fname): + os.remove(fname) + + fname=tempfile.mktemp(prefix='fitsio-HeaderContinue-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + data=numpy.zeros(10) + header = [ + # This is a snippet from a real DES FITS header, which (I guess incorrectly) + # puts an = sign after the CONTINUE. This didn't used to work. + "OBSERVER= 'Ross Cawthon(RM), Ricardo Ogando(OBS1), Rutu Das (OBS1) Michael &'", + "CONTINUE= ' ' / '&' / Observer name(s)", + ] + numpy.testing.assert_warns(fitsio.FITSRuntimeWarning, + fits.write_image, data, header=header) + + # The CONTINUE= line gets converted to a normal CONTINUE, so no warning on reads. + # There would be a warning if the file being read has CONTINUE=, but that would + # be harder to test explicitly, since fitsio won't write that file... + rh = fits[0].read_header() + assert rh.keys().count('CONTINUE') == 1 + + with fitsio.FITS(fname) as fits: + rh = fits[0].read_header() + assert rh.keys().count('CONTINUE') == 1 + + finally: + if os.path.exists(fname): + os.remove(fname) + + + + def testImageWriteRead(self): + """ + Test a basic image write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-ImageWrite-',suffix='.fits') + dtypes=['u1','i1','u2','i2','f4','f8'] + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + # note mixing up byte orders a bit + for dtype in dtypes: + data = numpy.arange(5*20,dtype=dtype).reshape(5,20) + header={'DTYPE':dtype,'NBYTES':data.dtype.itemsize} + fits.write_image(data, header=header) + rdata = fits[-1].read() + + self.compare_array(data, rdata, "images") + + rh = fits[-1].read_header() + self.check_header(header, rh) + + with fitsio.FITS(fname) as fits: + for i in xrange(len(dtypes)): + self.assertEqual(fits[i].is_compressed(), False, "not compressed") + + finally: + if os.path.exists(fname): + os.remove(fname) + + def testImageWriteEmpty(self): + """ + Test a basic image write, with no data and just a header, then reading back in to + check the values + """ + fname=tempfile.mktemp(prefix='fitsio-ImageWriteEmpty-',suffix='.fits') + try: + data=None + header={'EXPTIME':120, 'OBSERVER':'Beatrice Tinsley','INSTRUME':'DECam','FILTER':'r'} + with fitsio.FITS(fname,'rw',clobber=True, ignore_empty=True) as fits: + for extname in ['CCD1','CCD2','CCD3','CCD4','CCD5','CCD6','CCD7','CCD8']: + fits.write_image(data, header=header) + rdata = fits[-1].read() + rh = fits[-1].read_header() + self.check_header(header, rh) + finally: + if os.path.exists(fname): + os.remove(fname) + + def testImageWriteReadFromDims(self): + """ + Test a basic image write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-ImageWriteFromDims-',suffix='.fits') + dtypes=['u1','i1','u2','i2','f4','f8'] + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + # note mixing up byte orders a bit + for dtype in dtypes: + data = numpy.arange(5*20,dtype=dtype).reshape(5,20) + + fits.create_image_hdu(dims=data.shape, + dtype=data.dtype) + + fits[-1].write(data) + rdata = fits[-1].read() + + self.compare_array(data, rdata, "images") + + with fitsio.FITS(fname) as fits: + for i in xrange(len(dtypes)): + self.assertEqual(fits[i].is_compressed(), False, "not compressed") + + finally: + if os.path.exists(fname): + os.remove(fname) + + def testImageWriteReadFromDimsChunks(self): + """ + Test a basic image write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-ImageWriteFromDims-',suffix='.fits') + dtypes=['u1','i1','u2','i2','f4','f8'] + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + # note mixing up byte orders a bit + for dtype in dtypes: + data = numpy.arange(5*3,dtype=dtype).reshape(5,3) + + fits.create_image_hdu(dims=data.shape, + dtype=data.dtype) + + chunk1 = data[0:2, :] + chunk2 = data[2: , :] + + # + # first using scalar pixel offset + # + + fits[-1].write(chunk1) + + start=chunk1.size + fits[-1].write(chunk2, start=start) + + rdata = fits[-1].read() + + self.compare_array(data, rdata, "images") + + + # + # now using sequence, easier to calculate + # + + fits.create_image_hdu(dims=data.shape, + dtype=data.dtype) + + # first using pixel offset + fits[-1].write(chunk1) + + start=[2,0] + fits[-1].write(chunk2, start=start) + + rdata2 = fits[-1].read() + + self.compare_array(data, rdata2, "images") + + + with fitsio.FITS(fname) as fits: + for i in xrange(len(dtypes)): + self.assertEqual(fits[i].is_compressed(), False, "not compressed") + + finally: + if os.path.exists(fname): + os.remove(fname) + + + def testImageSlice(self): + fname=tempfile.mktemp(prefix='fitsio-ImageSlice-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + # note mixing up byte orders a bit + for dtype in ['u1','i1','u2','i2','f4','f8']: + data = numpy.arange(16*20,dtype=dtype).reshape(16,20) + header={'DTYPE':dtype,'NBYTES':data.dtype.itemsize} + fits.write_image(data, header=header) + rdata = fits[-1][4:12, 9:17] + + self.compare_array(data[4:12,9:17], rdata, "images") + + rh = fits[-1].read_header() + self.check_header(header, rh) + + finally: + if os.path.exists(fname): + os.remove(fname) + + + def testRiceTileCompressedWriteRead(self): + """ + Test a basic image write, data and a header, then reading back in to + check the values + """ + nrows=30 + ncols=100 + tile_dims=[5,10] + compress='rice' + fname=tempfile.mktemp(prefix='fitsio-ImageWrite-',suffix='.fits.fz') + dtypes = ['u1','i1','u2','i2','u4','i4','f4','f8'] + + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + # note i8 not supported for compressed! + + for dtype in dtypes: + data = numpy.arange(nrows*ncols,dtype=dtype).reshape(nrows,ncols) + fits.write_image(data, compress=compress) + #fits.reopen() + rdata = fits[-1].read() + + self.compare_array(data, rdata, + "%s compressed images ('%s')" % (compress,dtype)) + + + fits.write_image(data, compress=compress, tile_dims=tile_dims) + #fits.reopen() + rdata = fits[-1].read() + + self.compare_array(data, rdata, + ("%s tile dims compressed images " + "('%s')" % (compress,dtype))) + + with fitsio.FITS(fname) as fits: + for ii in xrange(len(dtypes)): + i=ii+1 + self.assertEqual(fits[i].is_compressed(), True, "is compressed") + + finally: + if os.path.exists(fname): + os.remove(fname) + + def testPLIOTileCompressedWriteRead(self): + """ + Test a basic image write, data and a header, then reading back in to + check the values + """ + + compress='plio' + fname=tempfile.mktemp(prefix='fitsio-ImageWrite-',suffix='.fits.fz') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + # note i8 not supported for compressed! + # also no writing unsigned, need to address + dtypes = ['u1','i1','u2','i2','i4','f4','f8'] + + for dtype in dtypes: + data = numpy.arange(5*20,dtype=dtype).reshape(5,20) + fits.write_image(data, compress=compress) + #fits.reopen() + rdata = fits[-1].read() + + self.compare_array(data, rdata, "%s compressed images ('%s')" % (compress,dtype)) + + finally: + if os.path.exists(fname): + os.remove(fname) + + def testGZIPTileCompressedWriteRead(self): + """ + Test a basic image write, data and a header, then reading back in to + check the values + """ + + compress='gzip' + fname=tempfile.mktemp(prefix='fitsio-ImageWrite-',suffix='.fits.fz') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + # note i8 not supported for compressed! + dtypes = ['u1','i1','u2','i2','u4','i4','f4','f8'] + + for dtype in dtypes: + data = numpy.arange(5*20,dtype=dtype).reshape(5,20) + fits.write_image(data, compress=compress) + rdata = fits[-1].read() + + self.compare_array(data, rdata, "%s compressed images ('%s')" % (compress,dtype)) + + finally: + if os.path.exists(fname): + os.remove(fname) + + def testGZIP2TileCompressedWriteRead(self): + """ + Test a basic image write, data and a header, then reading back in to + check the values + """ + + compress='gzip_2' + fname=tempfile.mktemp(prefix='fitsio-ImageWrite-',suffix='.fits.fz') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + # note i8 not supported for compressed! + dtypes = ['u1','i1','u2','i2','u4','i4','f4','f8'] + + for dtype in dtypes: + data = numpy.arange(5*20,dtype=dtype).reshape(5,20) + fits.write_image(data, compress=compress) + rdata = fits[-1].read() + + self.compare_array(data, rdata, "%s compressed images ('%s')" % (compress,dtype)) + + finally: + if os.path.exists(fname): + os.remove(fname) + + def testHCompressTileCompressedWriteRead(self): + """ + Test a basic image write, data and a header, then reading back in to + check the values + """ + + compress='hcompress' + fname=tempfile.mktemp(prefix='fitsio-ImageWrite-',suffix='.fits.fz') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + # note i8 not supported for compressed! + dtypes = ['u1','i1','u2','i2','u4','i4','f4','f8'] + + for dtype in dtypes: + if (dtype == 'u2') and ('SKIP_HCOMPRESS_U2_TEST' in os.environ): + continue + data = numpy.arange(5*20,dtype=dtype).reshape(5,20) + fits.write_image(data, compress=compress) + #fits.reopen() + rdata = fits[-1].read() + + self.compare_array(data, rdata, "%s compressed images ('%s')" % (compress,dtype)) + + finally: + if os.path.exists(fname): + os.remove(fname) + + + + def testWriteKeyDict(self): + """ + test that write_key works using a standard key dict + """ + + fname=tempfile.mktemp(prefix='fitsio-WriteKeyDict-',suffix='.fits') + nrows=3 + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + im=numpy.zeros( (10,10), dtype='i2' ) + fits.write(im) + + keydict = { + 'name':'test', + 'value':35, + 'comment':'keydict test', + } + fits[-1].write_key(**keydict) + + h = fits[-1].read_header() + + self.assertEqual(h['test'],keydict['value']) + self.assertEqual(h.get_comment('test'),keydict['comment']) + + finally: + if os.path.exists(fname): + os.remove(fname) + + + + def testMoveByName(self): + """ + Test a basic table write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-MoveByName-',suffix='.fits') + nrows=3 + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + data1=numpy.zeros(nrows,dtype=[('ra','f8'),('dec','f8')]) + data1['ra'] = numpy.random.random(nrows) + data1['dec'] = numpy.random.random(nrows) + fits.write_table(data1, extname='mytable') + + fits[-1].write_key("EXTVER", 1) + + data2=numpy.zeros(nrows,dtype=[('ra','f8'),('dec','f8')]) + data2['ra'] = numpy.random.random(nrows) + data2['dec'] = numpy.random.random(nrows) + + fits.write_table(data2, extname='mytable') + fits[-1].write_key("EXTVER", 2) + + hdunum1=fits.movnam_hdu('mytable',extver=1) + self.assertEqual(hdunum1,2) + hdunum2=fits.movnam_hdu('mytable',extver=2) + self.assertEqual(hdunum2,3) + + finally: + if os.path.exists(fname): + os.remove(fname) + + def testExtVer(self): + """ + Test using extname and extver, all combinations I can think of + """ + + fname=tempfile.mktemp(prefix='fitsio-ExtVer-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + img1=numpy.arange(2*3,dtype='i4').reshape(2,3) + 5 + img2=numpy.arange(2*3,dtype='i4').reshape(2,3) + 6 + img3=numpy.arange(2*3,dtype='i4').reshape(2,3) + 7 + + nrows=3 + data1=numpy.zeros(nrows,dtype=[('num','i4'),('ra','f8'),('dec','f8')]) + data1['num'] = 1 + data1['ra'] = numpy.random.random(nrows) + data1['dec'] = numpy.random.random(nrows) + + data2=numpy.zeros(nrows,dtype=[('num','i4'),('ra','f8'),('dec','f8')]) + data2['num'] = 2 + data2['ra'] = numpy.random.random(nrows) + data2['dec'] = numpy.random.random(nrows) + + data3=numpy.zeros(nrows,dtype=[('num','i4'),('ra','f8'),('dec','f8')]) + data3['num'] = 3 + data3['ra'] = numpy.random.random(nrows) + data3['dec'] = numpy.random.random(nrows) + + + fits.write_image(img1, extname='myimage', extver=1) + fits.write_table(data1) + fits.write_table(data2,extname='mytable', extver=1) + fits.write_image(img2, extname='myimage', extver=2) + fits.write_table(data3, extname='mytable',extver=2) + fits.write_image(img3) + + d1 = fits[1].read() + d2 = fits['mytable'].read() + d2b = fits['mytable',1].read() + d3 = fits['mytable',2].read() + + + for f in data1.dtype.names: + self.compare_rec(data1, d1, "data1") + self.compare_rec(data2, d2, "data2") + self.compare_rec(data2, d2b, "data2b") + self.compare_rec(data3, d3, "data3") + + dimg1 = fits[0].read() + dimg1b = fits['myimage',1].read() + dimg2 = fits['myimage',2].read() + dimg3 = fits[5].read() + + self.compare_array(img1, dimg1,"img1") + self.compare_array(img1, dimg1b,"img1b") + self.compare_array(img2, dimg2,"img2") + self.compare_array(img3, dimg3,"img3") + + + finally: + if os.path.exists(fname): + os.remove(fname) + + def testVariableLengthColumns(self): + """ + Write and read variable length columns + """ + + for vstorage in ['fixed','object']: + fname=tempfile.mktemp(prefix='fitsio-VarCol-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True,vstorage=vstorage) as fits: + fits.write(self.vardata) + + + # reading multiple columns + d = fits[1].read() + self.compare_rec_with_var(self.vardata,d,"read all test '%s'" % vstorage) + + cols=['u2scalar','Sobj'] + d = fits[1].read(columns=cols) + self.compare_rec_with_var(self.vardata,d,"read all test subcols '%s'" % vstorage) + + # one at a time + for f in self.vardata.dtype.names: + d = fits[1].read_column(f) + if fitsio.fitslib.is_object(self.vardata[f]): + self.compare_object_array(self.vardata[f], d, + "read all field '%s'" % f) + + # same as above with slices + # reading multiple columns + d = fits[1][:] + self.compare_rec_with_var(self.vardata,d,"read all test '%s'" % vstorage) + + d = fits[1][cols][:] + self.compare_rec_with_var(self.vardata,d,"read all test subcols '%s'" % vstorage) + + # one at a time + for f in self.vardata.dtype.names: + d = fits[1][f][:] + if fitsio.fitslib.is_object(self.vardata[f]): + self.compare_object_array(self.vardata[f], d, + "read all field '%s'" % f) + + + + # + # now same with sub rows + # + + # reading multiple columns + rows = numpy.array([0,2]) + d = fits[1].read(rows=rows) + self.compare_rec_with_var(self.vardata,d,"read subrows test '%s'" % vstorage, + rows=rows) + + d = fits[1].read(columns=cols, rows=rows) + self.compare_rec_with_var(self.vardata,d,"read subrows test subcols '%s'" % vstorage, + rows=rows) + + # one at a time + for f in self.vardata.dtype.names: + d = fits[1].read_column(f,rows=rows) + if fitsio.fitslib.is_object(self.vardata[f]): + self.compare_object_array(self.vardata[f], d, + "read subrows field '%s'" % f, + rows=rows) + + # same as above with slices + # reading multiple columns + d = fits[1][rows] + self.compare_rec_with_var(self.vardata,d,"read subrows slice test '%s'" % vstorage, + rows=rows) + d = fits[1][2:4] + self.compare_rec_with_var(self.vardata,d,"read slice test '%s'" % vstorage, + rows=numpy.array([2,3])) + + d = fits[1][cols][rows] + self.compare_rec_with_var(self.vardata,d,"read subcols subrows slice test '%s'" % vstorage, + rows=rows) + d = fits[1][cols][2:4] + self.compare_rec_with_var(self.vardata,d,"read subcols slice test '%s'" % vstorage, + rows=numpy.array([2,3])) + + # one at a time + for f in self.vardata.dtype.names: + d = fits[1][f][rows] + if fitsio.fitslib.is_object(self.vardata[f]): + self.compare_object_array(self.vardata[f], d, + "read subrows field '%s'" % f, + rows=rows) + d = fits[1][f][2:4] + if fitsio.fitslib.is_object(self.vardata[f]): + self.compare_object_array(self.vardata[f], d, + "read slice field '%s'" % f, + rows=numpy.array([2,3])) + + + + + finally: + if os.path.exists(fname): + os.remove(fname) + + + + def testTableWriteRead(self): + """ + Test a basic table write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-TableWrite-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + try: + fits.write_table(self.data, header=self.keys, extname='mytable') + write_success=True + except: + write_success=False + + self.assertTrue(write_success,"testing write does not raise an error") + if not write_success: + self.skipTest("cannot test result if write failed") + + d = fits[1].read() + self.compare_rec(self.data, d, "table read/write") + + h = fits[1].read_header() + self.compare_headerlist_header(self.keys, h) + + # see if our convenience functions are working + fitsio.write(fname, self.data2, + extname="newext", + header={'ra':335.2,'dec':-25.2}) + d = fitsio.read(fname, ext='newext') + self.compare_rec(self.data2, d, "table data2") + # now test read_column + with fitsio.FITS(fname) as fits: + + for f in self.data.dtype.names: + d = fits[1].read_column(f) + self.compare_array(self.data[f], d, "table 1 single field read '%s'" % f) + + for f in self.data2.dtype.names: + d = fits['newext'].read_column(f) + self.compare_array(self.data2[f], d, "table 2 single field read '%s'" % f) + + # now list of columns + for cols in [['u2scalar','f4vec','Sarr'], + ['f8scalar','u2arr','Sscalar']]: + d = fits[1].read(columns=cols) + for f in d.dtype.names: + self.compare_array(self.data[f][:], d[f], "test column list %s" % f) + + + rows = [1,3] + d = fits[1].read(columns=cols, rows=rows) + for f in d.dtype.names: + self.compare_array(self.data[f][rows], d[f], "test column list %s row subset" % f) + + finally: + if os.path.exists(fname): + #pass + os.remove(fname) + + def testTableFormatColumnSubset(self): + """ + Test a basic table write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-TableWrite-',suffix='.fits') + + with fitsio.FITS(fname,'rw',clobber=True) as fits: + data = numpy.empty(1, dtype=[('Z', 'f8'), ('Z_PERSON', 'f8')]) + data['Z'][:] = 1.0 + data['Z_PERSON'][:] = 1.0 + fits.write_table(data) + fits.write_table(data) + fits.write_table(data) + try: + with fitsio.FITS(fname,'r',clobber=True) as fits: + # assert we do not have an extra row of 'Z' + sz = str(fits[2]['Z_PERSON']).split('\n') + s = str(fits[2][('Z_PERSON', 'Z')]).split('\n') + assert len(sz) == len(s) - 1 + finally: + if os.path.exists(fname): + #pass + os.remove(fname) + + def testTableWriteDictOfArraysScratch(self): + """ + This version creating the table from the names and list + Test a basic table write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-TableDict-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + try: + d={} + for n in self.data.dtype.names: + d[n] = self.data[n] + + fits.write(d) + write_success=True + except: + write_success=False + + self.assertTrue(write_success,"write should not raise an error") + if not write_success: + self.skipTest("cannot test result if write failed") + + d = fitsio.read(fname) + self.compare_rec(self.data, d, "list of dicts, scratch") + + finally: + if os.path.exists(fname): + #pass + os.remove(fname) + + def testTableWriteDictOfArrays(self): + """ + This version creating the table from the names and list + Test a basic table write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-TableDict-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + try: + fits.create_table_hdu(self.data, extname='mytable') + + d={} + for n in self.data.dtype.names: + d[n] = self.data[n] + + fits[-1].write(d) + write_success=True + except: + write_success=False + + self.assertTrue(write_success,"write should not raise an error") + if not write_success: + self.skipTest("cannot test result if write failed") + + d = fitsio.read(fname) + self.compare_rec(self.data, d, "list of dicts") + + finally: + if os.path.exists(fname): + #pass + os.remove(fname) + + + def testTableWriteDictOfArraysVar(self): + """ + This version creating the table from the names and list + Test a basic table write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-TableDictVar-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + try: + d={} + for n in self.vardata.dtype.names: + d[n] = self.vardata[n] + + fits.write(d) + write_success=True + except: + write_success=False + + self.assertTrue(write_success,"write should not raise an error") + if not write_success: + self.skipTest("cannot test result if write failed") + + d = fitsio.read(fname) + self.compare_rec_with_var(self.vardata,d,"dict of arrays, var") + + finally: + if os.path.exists(fname): + #pass + os.remove(fname) + + + def testTableWriteListOfArraysScratch(self): + """ + This version creating the table from the names and list + Test a basic table write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-TableListScratch-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + try: + names = [n for n in self.data.dtype.names] + dlist = [self.data[n] for n in self.data.dtype.names] + fits.write(dlist, names=names) + write_success=True + except: + write_success=False + + self.assertTrue(write_success,"write should not raise an error") + if not write_success: + self.skipTest("cannot test result if write failed") + + d = fitsio.read(fname) + self.compare_rec(self.data, d, "list of arrays, scratch") + + finally: + if os.path.exists(fname): + #pass + os.remove(fname) + + + + def testTableWriteListOfArrays(self): + """ + Test a basic table write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-TableWriteList-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + try: + fits.create_table_hdu(self.data, extname='mytable') + + columns = [n for n in self.data.dtype.names] + dlist = [self.data[n] for n in self.data.dtype.names] + fits[-1].write(dlist, columns=columns) + write_success=True + except: + write_success=False + + self.assertTrue(write_success,"write should not raise an error") + if not write_success: + self.skipTest("cannot test result if write failed") + + d = fitsio.read(fname, ext='mytable') + self.compare_rec(self.data, d, "list of arrays") + + finally: + if os.path.exists(fname): + #pass + os.remove(fname) + + + def testTableWriteListOfArraysVar(self): + """ + This version creating the table from the names and list + Test a basic table write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-TableListScratch-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + try: + names = [n for n in self.vardata.dtype.names] + dlist = [self.vardata[n] for n in self.vardata.dtype.names] + fits.write(dlist, names=names) + write_success=True + except: + write_success=False + + self.assertTrue(write_success,"write should not raise an error") + if not write_success: + self.skipTest("cannot test result if write failed") + + d = fitsio.read(fname) + self.compare_rec_with_var(self.vardata,d,"list of arrays, var") + + finally: + if os.path.exists(fname): + #pass + os.remove(fname) + + def testTableIter(self): + """ + Test iterating over rows of a table + """ + + fname=tempfile.mktemp(prefix='fitsio-TableIter-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + try: + fits.write_table(self.data, header=self.keys, extname='mytable') + write_success=True + except: + write_success=False + + self.assertTrue(write_success,"testing write does not raise an error") + if not write_success: + self.skipTest("cannot test result if write failed") + + # one row at a time + with fitsio.FITS(fname) as fits: + hdu = fits["mytable"] + i=0 + for row_data in hdu: + self.compare_rec(self.data[i], row_data, "table data") + i+=1 + + finally: + if os.path.exists(fname): + #pass + os.remove(fname) + + def testAsciiTableWriteRead(self): + """ + Test a basic table write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-AsciiTableWrite-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + fits.write_table(self.ascii_data, table_type='ascii', header=self.keys, extname='mytable') + + # cfitsio always reports type as i4 and f8, period, even if if + # written with higher precision. Need to fix that somehow + for f in self.ascii_data.dtype.names: + d = fits[1].read_column(f) + if d.dtype == numpy.float64: + # note we should be able to do 1.11e-16 in principle, but in practice + # we get more like 2.15e-16 + self.compare_array_tol(self.ascii_data[f], d, 2.15e-16, "table field read '%s'" % f) + else: + self.compare_array(self.ascii_data[f], d, "table field read '%s'" % f) + + rows = [1,3] + for f in self.ascii_data.dtype.names: + d = fits[1].read_column(f,rows=rows) + if d.dtype == numpy.float64: + self.compare_array_tol(self.ascii_data[f][rows], d, 2.15e-16, + "table field read subrows '%s'" % f) + else: + self.compare_array(self.ascii_data[f][rows], d, + "table field read subrows '%s'" % f) + + beg=1 + end=3 + for f in self.ascii_data.dtype.names: + d = fits[1][f][beg:end] + if d.dtype == numpy.float64: + self.compare_array_tol(self.ascii_data[f][beg:end], d, 2.15e-16, + "table field read slice '%s'" % f) + else: + self.compare_array(self.ascii_data[f][beg:end], d, + "table field read slice '%s'" % f) + + cols = ['i2scalar','f4scalar'] + for f in self.ascii_data.dtype.names: + data = fits[1].read(columns=cols) + for f in data.dtype.names: + d=data[f] + if d.dtype == numpy.float64: + self.compare_array_tol(self.ascii_data[f], d, 2.15e-16, "table subcol, '%s'" % f) + else: + self.compare_array(self.ascii_data[f], d, "table subcol, '%s'" % f) + + data = fits[1][cols][:] + for f in data.dtype.names: + d=data[f] + if d.dtype == numpy.float64: + self.compare_array_tol(self.ascii_data[f], d, 2.15e-16, "table subcol, '%s'" % f) + else: + self.compare_array(self.ascii_data[f], d, "table subcol, '%s'" % f) + + rows=[1,3] + for f in self.ascii_data.dtype.names: + data = fits[1].read(columns=cols,rows=rows) + for f in data.dtype.names: + d=data[f] + if d.dtype == numpy.float64: + self.compare_array_tol(self.ascii_data[f][rows], d, 2.15e-16, + "table subcol, '%s'" % f) + else: + self.compare_array(self.ascii_data[f][rows], d, + "table subcol, '%s'" % f) + + data = fits[1][cols][rows] + for f in data.dtype.names: + d=data[f] + if d.dtype == numpy.float64: + self.compare_array_tol(self.ascii_data[f][rows], d, 2.15e-16, + "table subcol/row, '%s'" % f) + else: + self.compare_array(self.ascii_data[f][rows], d, + "table subcol/row, '%s'" % f) + + for f in self.ascii_data.dtype.names: + + data = fits[1][cols][beg:end] + for f in data.dtype.names: + d=data[f] + if d.dtype == numpy.float64: + self.compare_array_tol(self.ascii_data[f][beg:end], d, 2.15e-16, + "table subcol/slice, '%s'" % f) + else: + self.compare_array(self.ascii_data[f][beg:end], d, + "table subcol/slice, '%s'" % f) + + + + finally: + if os.path.exists(fname): + os.remove(fname) + + + def testTableInsertColumn(self): + """ + Insert a new column + """ + + fname=tempfile.mktemp(prefix='fitsio-TableInsertColumn-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + fits.write_table(self.data, header=self.keys, extname='mytable') + + d = fits[1].read() + + for n in d.dtype.names: + newname = n+'_insert' + + fits[1].insert_column(newname, d[n]) + + newdata = fits[1][newname][:] + + self.compare_array(d[n], newdata, "table single field insert and read '%s'" % n) + + finally: + if os.path.exists(fname): + os.remove(fname) + + def testTableDeleteRowRange(self): + """ + Insert a new column + """ + + fname=tempfile.mktemp(prefix='fitsio-TableDeleteRowRange-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + fits.write_table(self.data) + + rowslice = slice(1,3) + with fitsio.FITS(fname,'rw') as fits: + fits[1].delete_rows(rowslice) + + with fitsio.FITS(fname) as fits: + d = fits[1].read() + + compare_data = self.data[ [0,3] ] + self.compare_rec(compare_data, d, "delete row range") + + + finally: + if os.path.exists(fname): + os.remove(fname) + + def testTableDeleteRows(self): + """ + Insert a new column + """ + + fname=tempfile.mktemp(prefix='fitsio-TableDeleteRows-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + fits.write_table(self.data) + + rows2delete = [1,3] + with fitsio.FITS(fname,'rw') as fits: + fits[1].delete_rows(rows2delete) + + with fitsio.FITS(fname) as fits: + d = fits[1].read() + + compare_data = self.data[ [0,2] ] + self.compare_rec(compare_data, d, "delete rows") + + + finally: + if os.path.exists(fname): + os.remove(fname) + + def testTableResize(self): + """ + Insert a new column + """ + + fname=tempfile.mktemp(prefix='fitsio-TableResize-',suffix='.fits') + try: + + # + # shrink from back + # + with fitsio.FITS(fname,'rw',clobber=True) as fits: + fits.write_table(self.data) + + nrows = 2 + with fitsio.FITS(fname,'rw') as fits: + fits[1].resize(nrows) + + with fitsio.FITS(fname) as fits: + d = fits[1].read() + + compare_data = self.data[0:nrows] + self.compare_rec(compare_data, d, "shrink from back") + + + # + # shrink from front + # + with fitsio.FITS(fname,'rw',clobber=True) as fits: + fits.write_table(self.data) + + with fitsio.FITS(fname,'rw') as fits: + fits[1].resize(nrows, front=True) + + with fitsio.FITS(fname) as fits: + d = fits[1].read() + + compare_data = self.data[nrows-self.data.size:] + self.compare_rec(compare_data, d, "shrink from front") + + + # These don't get zerod + + nrows = 10 + add_data = numpy.zeros(nrows-self.data.size,dtype=self.data.dtype) + add_data['i1scalar'] = -128 + add_data['i1vec'] = -128 + add_data['i1arr'] = -128 + add_data['u2scalar'] = 32768 + add_data['u2vec'] = 32768 + add_data['u2arr'] = 32768 + add_data['u4scalar'] = 2147483648 + add_data['u4vec'] = 2147483648 + add_data['u4arr'] = 2147483648 + + + # + # expand at the back + # + with fitsio.FITS(fname,'rw',clobber=True) as fits: + fits.write_table(self.data) + with fitsio.FITS(fname,'rw') as fits: + fits[1].resize(nrows) + + with fitsio.FITS(fname) as fits: + d = fits[1].read() + + compare_data = numpy.hstack( (self.data, add_data) ) + self.compare_rec(compare_data, d, "expand at the back") + + # + # expand at the front + # + with fitsio.FITS(fname,'rw',clobber=True) as fits: + fits.write_table(self.data) + with fitsio.FITS(fname,'rw') as fits: + fits[1].resize(nrows, front=True) + + with fitsio.FITS(fname) as fits: + d = fits[1].read() + + compare_data = numpy.hstack( (add_data, self.data) ) + # These don't get zerod + self.compare_rec(compare_data, d, "expand at the front") + + + finally: + if os.path.exists(fname): + os.remove(fname) + + + + def testSlice(self): + """ + Test reading by slice + """ + + fname=tempfile.mktemp(prefix='fitsio-TableAppend-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + # initial write + fits.write_table(self.data) + + # test reading single columns + for f in self.data.dtype.names: + d = fits[1][f][:] + self.compare_array(self.data[f], d, "test read all rows %s column subset" % f) + + # test reading row subsets + rows = [1,3] + for f in self.data.dtype.names: + d = fits[1][f][rows] + self.compare_array(self.data[f][rows], d, "test %s row subset" % f) + for f in self.data.dtype.names: + d = fits[1][f][1:3] + self.compare_array(self.data[f][1:3], d, "test %s row slice" % f) + + # now list of columns + cols=['u2scalar','f4vec','Sarr'] + d = fits[1][cols][:] + for f in d.dtype.names: + self.compare_array(self.data[f][:], d[f], "test column list %s" % f) + + + cols=['u2scalar','f4vec','Sarr'] + d = fits[1][cols][rows] + for f in d.dtype.names: + self.compare_array(self.data[f][rows], d[f], "test column list %s row subset" % f) + + cols=['u2scalar','f4vec','Sarr'] + d = fits[1][cols][1:3] + for f in d.dtype.names: + self.compare_array(self.data[f][1:3], d[f], "test column list %s row slice" % f) + + + + finally: + if os.path.exists(fname): + os.remove(fname) + + + + + def testTableAppend(self): + """ + Test creating a table and appending new rows. + """ + + fname=tempfile.mktemp(prefix='fitsio-TableAppend-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + # initial write + fits.write_table(self.data, header=self.keys, extname='mytable') + # now append + data2 = self.data.copy() + data2['f4scalar'] = 3 + fits[1].append(data2) + + d = fits[1].read() + self.assertEqual(d.size, self.data.size*2) + + self.compare_rec(self.data, d[0:self.data.size], "Comparing initial write") + self.compare_rec(data2, d[self.data.size:], "Comparing appended data") + + h = fits[1].read_header() + self.compare_headerlist_header(self.keys, h) + + finally: + if os.path.exists(fname): + os.remove(fname) + + + + def testTableSubsets(self): + """ + Test a basic table write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-TableWrite-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + fits.write_table(self.data, header=self.keys, extname='mytable') + + + rows = [1,3] + d = fits[1].read(rows=rows) + self.compare_rec_subrows(self.data, d, rows, "table subset") + columns = ['i1scalar','f4arr'] + d = fits[1].read(columns=columns, rows=rows) + + for f in columns: + d = fits[1].read_column(f,rows=rows) + self.compare_array(self.data[f][rows], d, "row subset, multi-column '%s'" % f) + for f in self.data.dtype.names: + d = fits[1].read_column(f,rows=rows) + self.compare_array(self.data[f][rows], d, "row subset, column '%s'" % f) + + finally: + if os.path.exists(fname): + os.remove(fname) + + + + def testGZWriteRead(self): + """ + Test a basic table write, data and a header, then reading back in to + check the values + + this code all works, but the file is zere size when done! + """ + + fname=tempfile.mktemp(prefix='fitsio-GZTableWrite-',suffix='.fits.gz') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + fits.write_table(self.data, header=self.keys, extname='mytable') + + d = fits[1].read() + self.compare_rec(self.data, d, "gzip write/read") + + h = fits[1].read_header() + for entry in self.keys: + name=entry['name'].upper() + value=entry['value'] + hvalue = h[name] + if isinstance(hvalue,str): + hvalue = hvalue.strip() + self.assertEqual(value,hvalue,"testing header key '%s'" % name) + + if 'comment' in entry: + self.assertEqual(entry['comment'].strip(), + h.get_comment(name).strip(), + "testing comment for header key '%s'" % name) + stat=os.stat(fname) + self.assertNotEqual(stat.st_size, 0, "Making sure the data was flushed to disk") + finally: + if os.path.exists(fname): + os.remove(fname) + + def testBz2Read(self): + ''' + Write a normal .fits file, run bzip2 on it, then read the bz2 + file and verify that it's the same as what we put in; we don't + [currently support or] test *writing* bzip2. + ''' + + if 'SKIP_BZIP_TEST' in os.environ: + if sys.version_info >= (2,7,0): + self.skipTest("skipping bzip tests") + else: + # skipTest only works for python 2.7+ + # just return + return + + fname=tempfile.mktemp(prefix='fitsio-BZ2TableWrite-',suffix='.fits') + bzfname = fname + '.bz2' + + try: + fits = fitsio.FITS(fname,'rw',clobber=True) + fits.write_table(self.data, header=self.keys, extname='mytable') + fits.close() + + os.system('bzip2 %s' % fname) + f2 = fitsio.FITS(bzfname) + d = f2[1].read() + self.compare_rec(self.data, d, "bzip2 read") + + h = f2[1].read_header() + for entry in self.keys: + name=entry['name'].upper() + value=entry['value'] + hvalue = h[name] + if isinstance(hvalue,str): + hvalue = hvalue.strip() + self.assertEqual(value,hvalue,"testing header key '%s'" % name) + if 'comment' in entry: + self.assertEqual(entry['comment'].strip(), + h.get_comment(name).strip(), + "testing comment for header key '%s'" % name) + except: + import traceback + traceback.print_exc() + self.assertTrue(False, 'Exception in testing bzip2 reading') + finally: + if os.path.exists(fname): + os.remove(fname) + if os.path.exists(bzfname): + os.remove(bzfname) + pass + def testChecksum(self): + """ + Test a basic table write, data and a header, then reading back in to + check the values + """ + + fname=tempfile.mktemp(prefix='fitsio-Checksum-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + fits.write_table(self.data, header=self.keys, extname='mytable') + fits[1].write_checksum() + fits[1].verify_checksum() + finally: + if os.path.exists(fname): + os.remove(fname) + + def testTrimStrings(self): + fname=tempfile.mktemp(prefix='fitsio-Trim-',suffix='.fits') + dt=[('fval','f8'),('name','S15'),('vec','f4',2)] + n=3 + data=numpy.zeros(n, dtype=dt) + data['fval'] = numpy.random.random(n) + data['vec'] = numpy.random.random(n*2).reshape(n,2) + + data['name'] = ['mike','really_long_name_to_fill','jan'] + + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + fits.write(data) + + for onconstruct in [True,False]: + if onconstruct: + ctrim=True + otrim=False + else: + ctrim=False + otrim=True + + with fitsio.FITS(fname,'rw', trim_strings=ctrim) as fits: + + if ctrim: + dread=fits[1][:] + self.compare_rec( + data, + dread, + "trimmed strings constructor", + ) + + dname=fits[1]['name'][:] + self.compare_array( + data['name'], + dname, + "trimmed strings col read, constructor", + ) + dread=fits[1][ ['name'] ][:] + self.compare_array( + data['name'], + dread['name'], + "trimmed strings col read, constructor", + ) + + + + dread=fits[1].read(trim_strings=otrim) + self.compare_rec( + data, + dread, + "trimmed strings keyword", + ) + dname=fits[1].read(columns='name', trim_strings=otrim) + self.compare_array( + data['name'], + dname, + "trimmed strings col keyword", + ) + dread=fits[1].read(columns=['name'], trim_strings=otrim) + self.compare_array( + data['name'], + dread['name'], + "trimmed strings col keyword", + ) + + + + # convenience function + dread=fitsio.read(fname, trim_strings=True) + self.compare_rec( + data, + dread, + "trimmed strings convenience function", + ) + dname=fitsio.read(fname, columns='name', trim_strings=True) + self.compare_array( + data['name'], + dname, + "trimmed strings col convenience function", + ) + dread=fitsio.read(fname, columns=['name'], trim_strings=True) + self.compare_array( + data['name'], + dread['name'], + "trimmed strings col convenience function", + ) + + + + finally: + if os.path.exists(fname): + os.remove(fname) + + + def testLowerUpper(self): + fname=tempfile.mktemp(prefix='fitsio-LowerUpper-',suffix='.fits') + dt=[('MyName','f8'),('StuffThings','i4'),('Blah','f4')] + data=numpy.zeros(3, dtype=dt) + data['MyName'] = numpy.random.random(data.size) + data['StuffThings'] = numpy.random.random(data.size) + data['Blah'] = numpy.random.random(data.size) + + lnames = [n.lower() for n in data.dtype.names] + unames = [n.upper() for n in data.dtype.names] + + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + fits.write(data) + + for i in [1,2]: + if i == 1: + lower=True + upper=False + else: + lower=False + upper=True + + with fitsio.FITS(fname,'rw', lower=lower, upper=upper) as fits: + for rows in [None, [1,2]]: + + d=fits[1].read(rows=rows) + self.compare_names(d.dtype.names,data.dtype.names, + lower=lower,upper=upper) + + + d=fits[1].read(rows=rows, columns=['MyName','stuffthings']) + self.compare_names(d.dtype.names,data.dtype.names[0:2], + lower=lower,upper=upper) + + d = fits[1][1:2] + self.compare_names(d.dtype.names,data.dtype.names, + lower=lower,upper=upper) + + if rows is not None: + d = fits[1][rows] + else: + d = fits[1][:] + self.compare_names(d.dtype.names,data.dtype.names, + lower=lower,upper=upper) + + if rows is not None: + d = fits[1][['myname','stuffthings']][rows] + else: + d = fits[1][['myname','stuffthings']][:] + self.compare_names(d.dtype.names,data.dtype.names[0:2], + lower=lower,upper=upper) + + # using overrides + with fitsio.FITS(fname,'rw') as fits: + for rows in [None, [1,2]]: + + d=fits[1].read(rows=rows, lower=lower, upper=upper) + self.compare_names(d.dtype.names,data.dtype.names, + lower=lower,upper=upper) + + + d=fits[1].read(rows=rows, columns=['MyName','stuffthings'], + lower=lower,upper=upper) + self.compare_names(d.dtype.names,data.dtype.names[0:2], + lower=lower,upper=upper) + + + + for rows in [None, [1,2]]: + d=fitsio.read(fname, rows=rows, lower=lower, upper=upper) + self.compare_names(d.dtype.names,data.dtype.names, + lower=lower,upper=upper) + + d=fitsio.read(fname, rows=rows, columns=['MyName','stuffthings'], + lower=lower, upper=upper) + self.compare_names(d.dtype.names,data.dtype.names[0:2], + lower=lower,upper=upper) + + + finally: + if os.path.exists(fname): + os.remove(fname) + + def testReadRaw(self): + fname=tempfile.mktemp(prefix='fitsio-readraw-',suffix='.fits') + + dt=[('MyName','f8'),('StuffThings','i4'),('Blah','f4')] + data=numpy.zeros(3, dtype=dt) + data['MyName'] = numpy.random.random(data.size) + data['StuffThings'] = numpy.random.random(data.size) + data['Blah'] = numpy.random.random(data.size) + + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + fits.write(data) + raw1 = fits.read_raw() + + with fitsio.FITS('mem://', 'rw') as fits: + fits.write(data) + raw2 = fits.read_raw() + + f = open(fname, 'rb') + raw3 = f.read() + f.close() + + self.assertEqual(raw1, raw2) + self.assertEqual(raw1, raw3) + except: + import traceback + traceback.print_exc() + self.assertTrue(False, 'Exception in testing read_raw') + + def testTableBitcolReadWrite(self): + """ + Test basic write/read with bitcols + """ + + fname=tempfile.mktemp(prefix='fitsio-TableWriteBitcol-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + try: + fits.write_table(self.bdata, extname='mytable', write_bitcols=True) + write_success=True + except: + write_success=False + + self.assertTrue(write_success,"testing write does not raise an error") + if not write_success: + self.skipTest("cannot test result if write failed") + + d=fits[1].read() + self.compare_rec(self.bdata, d, "table read/write") + + # now test read_column + with fitsio.FITS(fname) as fits: + + for f in self.bdata.dtype.names: + d = fits[1].read_column(f) + self.compare_array(self.bdata[f], d, "table 1 single field read '%s'" % f) + + # now list of columns + for cols in [['b1vec','b1arr']]: + d = fits[1].read(columns=cols) + for f in d.dtype.names: + self.compare_array(self.bdata[f][:], d[f], "test column list %s" % f) + + rows = [1,3] + d = fits[1].read(columns=cols, rows=rows) + for f in d.dtype.names: + self.compare_array(self.bdata[f][rows], d[f], "test column list %s row subset" % f) + + finally: + if os.path.exists(fname): + os.remove(fname) + + def testTableBitcolAppend(self): + """ + Test creating a table with bitcol support and appending new rows. + """ + + fname=tempfile.mktemp(prefix='fitsio-TableAppendBitcol-',suffix='.fits') + try: + with fitsio.FITS(fname,'rw',clobber=True) as fits: + + # initial write + fits.write_table(self.bdata, extname='mytable', write_bitcols=True) + # now append + bdata2 = self.bdata.copy() + fits[1].append(bdata2) + + d = fits[1].read() + self.assertEqual(d.size, self.bdata.size*2) + + self.compare_rec(self.bdata, d[0:self.data.size], "Comparing initial write") + self.compare_rec(bdata2, d[self.data.size:], "Comparing appended data") + + finally: + if os.path.exists(fname): + os.remove(fname) + + def compare_names(self, read_names, true_names, lower=False, upper=False): + for nread,ntrue in zip(read_names,true_names): + if lower: + tname = ntrue.lower() + mess="lower: '%s' vs '%s'" % (nread,tname) + else: + tname = ntrue.upper() + mess="upper: '%s' vs '%s'" % (nread,tname) + self.assertEqual(nread, tname, mess) + + def check_header(self, header, rh): + for k in header: + v = header[k] + rv = rh[k] + if isinstance(rv,str): + v = v.strip() + rv = rv.strip() + self.assertEqual(v,rv,"testing equal key '%s'" % k) + + + def compare_headerlist_header(self, header_list, header): + """ + The first is a list of dicts, second a FITSHDR + """ + for entry in header_list: + name=entry['name'].upper() + value=entry['value'] + hvalue = header[name] + if isinstance(hvalue,str): + hvalue = hvalue.strip() + self.assertEqual(value,hvalue,"testing header key '%s'" % name) + + if 'comment' in entry: + self.assertEqual(entry['comment'].strip(), + header.get_comment(name).strip(), + "testing comment for header key '%s'" % name) + + def compare_array_tol(self, arr1, arr2, tol, name): + self.assertEqual(arr1.shape, arr2.shape, + "testing arrays '%s' shapes are equal: " + "input %s, read: %s" % (name, arr1.shape, arr2.shape)) + + adiff = numpy.abs( (arr1-arr2)/arr1 ) + maxdiff = adiff.max() + res=numpy.where(adiff > tol) + for i,w in enumerate(res): + self.assertEqual(w.size,0, + "testing array '%s' dim %d are " + "equal within tolerance %e, found " + "max diff %e" % (name,i,tol,maxdiff)) + + + def compare_array(self, arr1, arr2, name): + self.assertEqual(arr1.shape, arr2.shape, + "testing arrays '%s' shapes are equal: " + "input %s, read: %s" % (name, arr1.shape, arr2.shape)) + + res=numpy.where(arr1 != arr2) + for i,w in enumerate(res): + self.assertEqual(w.size,0,"testing array '%s' dim %d are equal" % (name,i)) + + def compare_rec(self, rec1, rec2, name): + for f in rec1.dtype.names: + self.assertEqual(rec1[f].shape, rec2[f].shape, + "testing '%s' field '%s' shapes are equal: " + "input %s, read: %s" % (name, f,rec1[f].shape, rec2[f].shape)) + + res=numpy.where(rec1[f] != rec2[f]) + for w in res: + self.assertEqual(w.size,0,"testing column %s" % f) + + def compare_rec_subrows(self, rec1, rec2, rows, name): + for f in rec1.dtype.names: + self.assertEqual(rec1[f][rows].shape, rec2[f].shape, + "testing '%s' field '%s' shapes are equal: " + "input %s, read: %s" % (name, f,rec1[f].shape, rec2[f].shape)) + + res=numpy.where(rec1[f][rows] != rec2[f]) + for w in res: + self.assertEqual(w.size,0,"testing column %s" % f) + + #self.assertEqual(2,3,"on purpose error") + + def compare_rec_with_var(self, rec1, rec2, name, rows=None): + """ + + First one *must* be the one with object arrays + + Second can have fixed length + + both should be same number of rows + + """ + + if rows is None: + rows = arange(rec2.size) + self.assertEqual(rec1.size,rec2.size, + "testing '%s' same number of rows" % name) + + # rec2 may have fewer fields + for f in rec2.dtype.names: + + # f1 will have the objects + if fitsio.fitslib.is_object(rec1[f]): + self.compare_object_array(rec1[f], rec2[f], + "testing '%s' field '%s'" % (name,f), + rows=rows) + else: + self.compare_array(rec1[f][rows], rec2[f], + "testing '%s' num field '%s' equal" % (name,f)) + + def compare_object_array(self, arr1, arr2, name, rows=None): + """ + The first must be object + """ + if rows is None: + rows = arange(arr1.size) + + for i,row in enumerate(rows): + if isinstance(arr2[i],stype): + self.assertEqual(arr1[row],arr2[i], + "%s str el %d equal" % (name,i)) + else: + delement = arr2[i] + orig = arr1[row] + s=len(orig) + self.compare_array(orig, delement[0:s], + "%s num el %d equal" % (name,i)) + + def compare_rec_with_var_subrows(self, rec1, rec2, name, rows): + """ + + Second one must be the one with object arrays + + """ + for f in rec1.dtype.names: + if fitsio.fitslib.is_object(rec2[f]): + + for i in xrange(rec2.size): + if isinstance(rec2[f][i],stype): + self.assertEqual(rec1[f][i],rec2[f][i], + "testing '%s' str field '%s' el %d equal" % (name,f,i)) + else: + delement = rec1[f][i] + orig = rec2[f][i] + s=orig.size + self.compare_array(orig, delement[0:s], + "testing '%s' num field '%s' el %d equal" % (name,f,i)) + else: + self.compare_array(rec1[f], rec2[f], + "testing '%s' num field '%s' equal" % (name,f)) + + + + + +if __name__ == '__main__': + test() + diff --git a/fitsio/util.py b/fitsio/util.py new file mode 100644 index 0000000..9802777 --- /dev/null +++ b/fitsio/util.py @@ -0,0 +1,22 @@ +""" +utilities for the fits library +""" + +from . import _fitsio_wrap + +class FITSRuntimeWarning(RuntimeWarning): + pass + +def cfitsio_version(asfloat=False): + """ + Return the cfitsio version as a string. + """ + # use string version to avoid roundoffs + ver= '%0.3f' % _fitsio_wrap.cfitsio_version() + if asfloat: + return float(ver) + else: + return ver + + + diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..8bfd5a1 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,4 @@ +[egg_info] +tag_build = +tag_date = 0 + diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..a55ed13 --- /dev/null +++ b/setup.py @@ -0,0 +1,242 @@ +# +# setup script for fitsio, using setuptools +# +# c.f. +# https://packaging.python.org/guides/distributing-packages-using-setuptools/ + +from __future__ import print_function +from setuptools import setup, Extension +from distutils.command.build_ext import build_ext + +import os +from subprocess import Popen, PIPE +import glob +import shutil + +class build_ext_subclass(build_ext): + boolean_options = build_ext.boolean_options + ['use-system-fitsio'] + + user_options = build_ext.user_options + \ + [('use-system-fitsio', None, + "Use the cfitsio installed in the system"), + + ('system-fitsio-includedir=', None, + "Path to look for cfitsio header; default is the system search path."), + + ('system-fitsio-libdir=', None, + "Path to look for cfitsio library; default is the system search path."), + ] + cfitsio_version = '3430patch' + cfitsio_dir = 'cfitsio%s' % cfitsio_version + + def initialize_options(self): + self.use_system_fitsio = False + self.system_fitsio_includedir = None + self.system_fitsio_libdir = None + build_ext.initialize_options(self) + + def finalize_options(self): + + build_ext.finalize_options(self) + + self.cfitsio_build_dir = os.path.join(self.build_temp, self.cfitsio_dir) + self.cfitsio_zlib_dir = os.path.join(self.cfitsio_build_dir,'zlib') + + if self.use_system_fitsio: + if self.system_fitsio_includedir: + self.include_dirs.insert(0, self.system_fitsio_includedir) + if self.system_fitsio_libdir: + self.library_dirs.insert(0, self.system_fitsio_libdir) + else: + # We defer configuration of the bundled cfitsio to build_extensions + # because we will know the compiler there. + self.include_dirs.insert(0, self.cfitsio_build_dir) + + def run(self): + # For extensions that require 'numpy' in their include dirs, + # replace 'numpy' with the actual paths + import numpy + np_include = numpy.get_include() + + for extension in self.extensions: + if 'numpy' in extension.include_dirs: + idx = extension.include_dirs.index('numpy') + extension.include_dirs.insert(idx, np_include) + extension.include_dirs.remove('numpy') + + build_ext.run(self) + + def build_extensions(self): + if not self.use_system_fitsio: + + # Use the compiler for building python to build cfitsio + # for maximized compatibility. + + # there is some issue with non-aligned data with optimizations + # set to '-O3' on some versions of gcc. It appears to be + # a disagreement between gcc 4 and gcc 5 + + CCold=self.compiler.compiler + + CC=[] + for val in CCold: + if val=='-O3': + print("replacing '-O3' with '-O2' to address " + "gcc bug") + val='-O2' + + if val=='ccache': + print("removing ccache from the compiler options") + continue + + CC.append(val) + + self.configure_cfitsio( + CC=CC, + ARCHIVE=self.compiler.archiver, + RANLIB=self.compiler.ranlib, + ) + + # If configure detected bzlib.h, we have to link to libbz2 + + if '-DHAVE_BZIP2=1' in open(os.path.join(self.cfitsio_build_dir, 'Makefile')).read(): + self.compiler.add_library('bz2') + + if '-DCFITSIO_HAVE_CURL=1' in open(os.path.join(self.cfitsio_build_dir, 'Makefile')).read(): + self.compiler.add_library('curl') + + self.compile_cfitsio() + + # link against the .a library in cfitsio; + # It should have been a 'static' library of relocatable objects (-fPIC), + # since we use the python compiler flags + + link_objects = glob.glob(os.path.join(self.cfitsio_build_dir,'*.a')) + + self.compiler.set_link_objects(link_objects) + + # Ultimate hack: append the .a files to the dependency list + # so they will be properly rebuild if cfitsio source is updated. + for ext in self.extensions: + ext.depends += link_objects + else: + self.compiler.add_library('cfitsio') + + # Check if system cfitsio was compiled with bzip2 and/or curl + if self.check_system_cfitsio_objects('bzip2'): + self.compiler.add_library('bz2') + if self.check_system_cfitsio_objects('curl_'): + self.compiler.add_library('curl') + + # fitsio requires libm as well. + self.compiler.add_library('m') + + # call the original build_extensions + + build_ext.build_extensions(self) + + def configure_cfitsio(self, CC=None, ARCHIVE=None, RANLIB=None): + + # prepare source code and run configure + def copy_update(dir1,dir2): + f1 = os.listdir(dir1) + for f in f1: + path1 = os.path.join(dir1,f) + path2 = os.path.join(dir2,f) + + if os.path.isdir(path1): + if not os.path.exists(path2): + os.makedirs(path2) + copy_update(path1,path2) + else: + if not os.path.exists(path2): + shutil.copy(path1,path2) + else: + stat1 = os.stat(path1) + stat2 = os.stat(path2) + if (stat1.st_mtime > stat2.st_mtime): + shutil.copy(path1,path2) + + + if not os.path.exists('build'): + ret=os.makedirs('build') + + if not os.path.exists(self.cfitsio_build_dir): + ret=os.makedirs(self.cfitsio_build_dir) + + copy_update(self.cfitsio_dir, self.cfitsio_build_dir) + + makefile = os.path.join(self.cfitsio_build_dir, 'Makefile') + + if os.path.exists(makefile): + # Makefile already there + return + + args = '' + args += ' CC="%s"' % ' '.join(CC[:1]) + args += ' CFLAGS="%s"' % ' '.join(CC[1:]) + + if ARCHIVE: + args += ' ARCHIVE="%s"' % ' '.join(ARCHIVE) + if RANLIB: + args += ' RANLIB="%s"' % ' '.join(RANLIB) + + p = Popen("sh ./configure --with-bzip2 " + args, + shell=True, cwd=self.cfitsio_build_dir) + p.wait() + if p.returncode != 0: + raise ValueError("could not configure cfitsio %s" % self.cfitsio_version) + + def compile_cfitsio(self): + p = Popen("make", + shell=True, cwd=self.cfitsio_build_dir) + p.wait() + if p.returncode != 0: + raise ValueError("could not compile cfitsio %s" % self.cfitsio_version) + + def check_system_cfitsio_objects(self, obj_name): + for lib_dir in self.library_dirs: + if os.path.isfile('%s/libcfitsio.a' % (lib_dir)): + p = Popen("nm -g %s/libcfitsio.a | grep %s" % (lib_dir, obj_name), + shell=True, stdout=PIPE, stderr=PIPE) + if len(p.stdout.read()) > 0: + return True + else: + return False + +sources = ["fitsio/fitsio_pywrap.c"] +data_files=[] + +ext=Extension("fitsio._fitsio_wrap", + sources, include_dirs=['numpy']) + +description = ("A full featured python library to read from and " + "write to FITS files.") + +long_description=open(os.path.join(os.path.dirname(__file__), "README.md")).read() + +classifiers = ["Development Status :: 5 - Production/Stable" + ,"License :: OSI Approved :: GNU General Public License (GPL)" + ,"Topic :: Scientific/Engineering :: Astronomy" + ,"Intended Audience :: Science/Research" + ] + +setup(name="fitsio", + version="0.9.12", + description=description, + long_description=long_description, + long_description_content_type='text/markdown; charset=UTF-8; variant=GFM', + license = "GPL", + classifiers=classifiers, + url="https://github.com/esheldon/fitsio", + author="Erin Scott Sheldon", + author_email="erin.sheldon@gmail.com", + setup_requires=['numpy'], + install_requires=['numpy'], + packages=['fitsio'], + data_files=data_files, + ext_modules=[ext], + cmdclass = { + "build_ext": build_ext_subclass, + } + ) -- 2.30.2