From d30ed2f1df4e4c471ba42d561327e671a933fee1 Mon Sep 17 00:00:00 2001 From: Ole Streicher Date: Tue, 19 Dec 2017 10:35:47 +0000 Subject: [PATCH 1/1] Import python-astropy_2.0.3.orig.tar.gz [dgit import orig python-astropy_2.0.3.orig.tar.gz] --- .astropy-root | 0 CHANGES.rst | 7984 ++++ LICENSE.rst | 26 + PKG-INFO | 30 + README.rst | 51 + ah_bootstrap.py | 958 + astropy/__init__.py | 342 + astropy/_compiler.c | 129 + astropy/_erfa/__init__.py | 7 + astropy/_erfa/core.c | 6044 +++ astropy/_erfa/core.c.templ | 144 + astropy/_erfa/core.py | 22631 +++++++++++ astropy/_erfa/core.py.templ | 285 + astropy/_erfa/erfa_generator.py | 563 + astropy/_erfa/setup_package.py | 118 + astropy/_erfa/tests/__init__.py | 1 + astropy/_erfa/tests/test_erfa.py | 233 + astropy/analytic_functions/__init__.py | 10 + astropy/analytic_functions/blackbody.py | 79 + astropy/analytic_functions/tests/__init__.py | 0 .../tests/test_blackbody.py | 22 + astropy/astropy.cfg | 160 + astropy/config/__init__.py | 13 + astropy/config/affiliated.py | 9 + astropy/config/configuration.py | 724 + astropy/config/paths.py | 315 + astropy/config/setup_package.py | 11 + astropy/config/tests/__init__.py | 2 + astropy/config/tests/data/alias.cfg | 2 + astropy/config/tests/data/astropy.0.3.cfg | 149 + .../config/tests/data/astropy.0.3.windows.cfg | 149 + astropy/config/tests/data/deprecated.cfg | 2 + astropy/config/tests/data/empty.cfg | 15 + astropy/config/tests/data/not_empty.cfg | 15 + astropy/config/tests/test_configs.py | 358 + astropy/conftest.py | 16 + astropy/constants/__init__.py | 56 + astropy/constants/astropyconst13.py | 20 + astropy/constants/astropyconst20.py | 19 + astropy/constants/cgs.py | 18 + astropy/constants/codata2010.py | 112 + astropy/constants/codata2014.py | 107 + astropy/constants/constant.py | 237 + astropy/constants/iau2012.py | 78 + astropy/constants/iau2015.py | 96 + astropy/constants/setup_package.py | 5 + astropy/constants/si.py | 20 + astropy/constants/tests/__init__.py | 2 + astropy/constants/tests/test_constant.py | 165 + astropy/constants/tests/test_pickle.py | 22 + astropy/constants/tests/test_prior_version.py | 161 + astropy/convolution/__init__.py | 15 + astropy/convolution/boundary_extend.c | 9883 +++++ astropy/convolution/boundary_extend.pyx | 187 + astropy/convolution/boundary_fill.c | 9971 +++++ astropy/convolution/boundary_fill.pyx | 192 + astropy/convolution/boundary_none.c | 9678 +++++ astropy/convolution/boundary_none.pyx | 175 + astropy/convolution/boundary_wrap.c | 9903 +++++ astropy/convolution/boundary_wrap.pyx | 183 + astropy/convolution/convolve.py | 818 + astropy/convolution/core.py | 372 + astropy/convolution/kernels.py | 1018 + astropy/convolution/setup_package.py | 5 + astropy/convolution/tests/__init__.py | 0 astropy/convolution/tests/test_convolve.py | 755 + .../convolution/tests/test_convolve_fft.py | 580 + .../tests/test_convolve_kernels.py | 130 + .../convolution/tests/test_convolve_models.py | 107 + .../convolution/tests/test_convolve_nddata.py | 58 + .../convolution/tests/test_convolve_speeds.py | 187 + astropy/convolution/tests/test_discretize.py | 198 + .../convolution/tests/test_kernel_class.py | 522 + astropy/convolution/tests/test_pickle.py | 27 + astropy/convolution/utils.py | 301 + astropy/coordinates/__init__.py | 43 + astropy/coordinates/angle_lextab.py | 13 + astropy/coordinates/angle_parsetab.py | 66 + astropy/coordinates/angle_utilities.py | 697 + astropy/coordinates/angles.py | 667 + astropy/coordinates/attributes.py | 528 + astropy/coordinates/baseframe.py | 1415 + .../coordinates/builtin_frames/__init__.py | 127 + astropy/coordinates/builtin_frames/altaz.py | 158 + .../coordinates/builtin_frames/baseradec.py | 92 + astropy/coordinates/builtin_frames/cirs.py | 33 + .../cirs_observed_transforms.py | 133 + .../coordinates/builtin_frames/ecliptic.py | 179 + .../builtin_frames/ecliptic_transforms.py | 118 + astropy/coordinates/builtin_frames/fk4.py | 220 + .../builtin_frames/fk4_fk5_transforms.py | 71 + astropy/coordinates/builtin_frames/fk5.py | 63 + .../coordinates/builtin_frames/galactic.py | 123 + .../builtin_frames/galactic_transforms.py | 47 + .../builtin_frames/galactocentric.py | 311 + astropy/coordinates/builtin_frames/gcrs.py | 102 + astropy/coordinates/builtin_frames/hcrs.py | 44 + astropy/coordinates/builtin_frames/icrs.py | 26 + .../builtin_frames/icrs_cirs_transforms.py | 334 + .../builtin_frames/icrs_fk5_transforms.py | 49 + .../intermediate_rotation_transforms.py | 132 + astropy/coordinates/builtin_frames/itrs.py | 37 + astropy/coordinates/builtin_frames/lsr.py | 202 + .../coordinates/builtin_frames/skyoffset.py | 230 + .../builtin_frames/supergalactic.py | 92 + .../supergalactic_transforms.py | 25 + astropy/coordinates/builtin_frames/utils.py | 295 + astropy/coordinates/calculation.py | 141 + .../data/constellation_data_roman87.dat | 358 + .../coordinates/data/constellation_names.dat | 89 + astropy/coordinates/data/sites.json | 15 + astropy/coordinates/distances.py | 206 + astropy/coordinates/earth.py | 743 + astropy/coordinates/earth_orientation.py | 413 + astropy/coordinates/errors.py | 177 + astropy/coordinates/funcs.py | 283 + astropy/coordinates/matching.py | 469 + astropy/coordinates/matrix_utilities.py | 130 + astropy/coordinates/name_resolve.py | 173 + astropy/coordinates/orbital_elements.py | 247 + astropy/coordinates/representation.py | 2804 ++ astropy/coordinates/setup_package.py | 10 + astropy/coordinates/sites.py | 137 + astropy/coordinates/sky_coordinate.py | 1911 + astropy/coordinates/solar_system.py | 511 + astropy/coordinates/tests/__init__.py | 2 + .../coordinates/tests/accuracy/__init__.py | 11 + .../tests/accuracy/fk4_no_e_fk4.csv | 202 + .../tests/accuracy/fk4_no_e_fk5.csv | 202 + .../tests/accuracy/galactic_fk4.csv | 202 + .../tests/accuracy/generate_ref_ast.py | 257 + .../coordinates/tests/accuracy/icrs_fk5.csv | 202 + .../tests/accuracy/test_altaz_icrs.py | 189 + .../tests/accuracy/test_ecliptic.py | 110 + .../tests/accuracy/test_fk4_no_e_fk4.py | 64 + .../tests/accuracy/test_fk4_no_e_fk5.py | 65 + .../tests/accuracy/test_galactic_fk4.py | 62 + .../tests/accuracy/test_icrs_fk5.py | 61 + astropy/coordinates/tests/test_angles.py | 887 + .../tests/test_angular_separation.py | 109 + astropy/coordinates/tests/test_api_ape5.py | 449 + astropy/coordinates/tests/test_arrays.py | 272 + .../tests/test_atc_replacements.py | 38 + .../tests/test_celestial_transformations.py | 295 + astropy/coordinates/tests/test_distance.py | 256 + astropy/coordinates/tests/test_earth.py | 322 + .../test_finite_difference_velocities.py | 229 + astropy/coordinates/tests/test_formatting.py | 135 + astropy/coordinates/tests/test_frames.py | 904 + .../tests/test_frames_with_velocity.py | 223 + astropy/coordinates/tests/test_funcs.py | 81 + .../coordinates/tests/test_iau_fullstack.py | 182 + .../test_intermediate_transformations.py | 521 + astropy/coordinates/tests/test_matching.py | 290 + .../tests/test_matrix_utilities.py | 47 + .../coordinates/tests/test_name_resolve.py | 161 + astropy/coordinates/tests/test_pickle.py | 75 + astropy/coordinates/tests/test_regression.py | 596 + .../coordinates/tests/test_representation.py | 1365 + .../tests/test_representation_arithmetic.py | 1225 + .../tests/test_representation_methods.py | 273 + .../tests/test_shape_manipulation.py | 267 + astropy/coordinates/tests/test_sites.py | 170 + astropy/coordinates/tests/test_sky_coord.py | 1389 + .../tests/test_skyoffset_transformations.py | 312 + .../coordinates/tests/test_solar_system.py | 365 + .../coordinates/tests/test_transformations.py | 434 + .../tests/test_unit_representation.py | 83 + .../coordinates/tests/test_velocity_corrs.py | 300 + astropy/coordinates/tests/utils.py | 27 + astropy/coordinates/transformations.py | 1327 + astropy/cosmology/__init__.py | 13 + astropy/cosmology/core.py | 2904 ++ astropy/cosmology/funcs.py | 146 + astropy/cosmology/parameters.py | 148 + astropy/cosmology/scalar_inv_efuncs.c | 8546 ++++ astropy/cosmology/scalar_inv_efuncs.pyx | 225 + astropy/cosmology/setup_package.py | 5 + astropy/cosmology/tests/__init__.py | 0 astropy/cosmology/tests/test_cosmology.py | 1567 + astropy/cosmology/tests/test_pickle.py | 19 + astropy/cython_version.py | 2 + astropy/extern/__init__.py | 10 + astropy/extern/bundled/__init__.py | 0 astropy/extern/bundled/six.py | 868 + astropy/extern/configobj/__init__.py | 0 astropy/extern/configobj/configobj.py | 2485 ++ astropy/extern/configobj/validate.py | 1473 + astropy/extern/css/jquery.dataTables.css | 452 + astropy/extern/js/jquery-3.1.1.js | 10220 +++++ astropy/extern/js/jquery-3.1.1.min.js | 4 + astropy/extern/js/jquery.dataTables.js | 15278 +++++++ astropy/extern/js/jquery.dataTables.min.js | 166 + astropy/extern/plugins/__init__.py | 10 + .../plugins/pytest_doctestplus/__init__.py | 4 + .../pytest_doctestplus/output_checker.py | 199 + .../plugins/pytest_doctestplus/plugin.py | 373 + .../plugins/pytest_openfiles/__init__.py | 4 + .../extern/plugins/pytest_openfiles/plugin.py | 104 + .../plugins/pytest_remotedata/__init__.py | 4 + .../pytest_remotedata/disable_internet.py | 152 + .../plugins/pytest_remotedata/plugin.py | 80 + astropy/extern/ply/__init__.py | 5 + astropy/extern/ply/cpp.py | 917 + astropy/extern/ply/ctokens.py | 133 + astropy/extern/ply/lex.py | 1100 + astropy/extern/ply/yacc.py | 3492 ++ astropy/extern/setup_package.py | 13 + astropy/extern/six.py | 60 + astropy/io/__init__.py | 5 + astropy/io/ascii/__init__.py | 46 + astropy/io/ascii/basic.py | 390 + astropy/io/ascii/cds.py | 322 + astropy/io/ascii/connect.py | 95 + astropy/io/ascii/core.py | 1538 + astropy/io/ascii/cparser.c | 32910 ++++++++++++++++ astropy/io/ascii/cparser.pyx | 1088 + astropy/io/ascii/daophot.py | 395 + astropy/io/ascii/ecsv.py | 252 + astropy/io/ascii/fastbasic.py | 346 + astropy/io/ascii/fixedwidth.py | 406 + astropy/io/ascii/html.py | 469 + astropy/io/ascii/ipac.py | 526 + astropy/io/ascii/latex.py | 444 + astropy/io/ascii/misc.py | 129 + astropy/io/ascii/rst.py | 63 + astropy/io/ascii/setup_package.py | 94 + astropy/io/ascii/sextractor.py | 151 + astropy/io/ascii/src/tokenizer.c | 1062 + astropy/io/ascii/src/tokenizer.h | 113 + astropy/io/ascii/tests/__init__.py | 0 astropy/io/ascii/tests/common.py | 109 + astropy/io/ascii/tests/t/apostrophe.rdb | 6 + astropy/io/ascii/tests/t/apostrophe.tab | 3 + astropy/io/ascii/tests/t/bad.txt | 6 + astropy/io/ascii/tests/t/bars_at_ends.txt | 4 + astropy/io/ascii/tests/t/cds.dat | 38 + .../io/ascii/tests/t/cds/description/ReadMe | 67 + .../ascii/tests/t/cds/description/table.dat | 2 + astropy/io/ascii/tests/t/cds/glob/ReadMe | 572 + .../io/ascii/tests/t/cds/glob/lmxbrefs.dat | 291 + astropy/io/ascii/tests/t/cds/multi/ReadMe | 64 + .../io/ascii/tests/t/cds/multi/lhs2065.dat | 18 + .../io/ascii/tests/t/cds/multi/lp944-20.dat | 18 + astropy/io/ascii/tests/t/cds2.dat | 287 + astropy/io/ascii/tests/t/cds_malformed.dat | 37 + astropy/io/ascii/tests/t/commented_header.dat | 4 + .../io/ascii/tests/t/commented_header2.dat | 5 + astropy/io/ascii/tests/t/continuation.dat | 4 + astropy/io/ascii/tests/t/daophot.dat | 45 + astropy/io/ascii/tests/t/daophot.dat.gz | Bin 0 -> 793 bytes astropy/io/ascii/tests/t/daophot2.dat | 31 + astropy/io/ascii/tests/t/daophot3.dat | 120 + astropy/io/ascii/tests/t/daophot4.dat | 113 + astropy/io/ascii/tests/t/fill_values.txt | 3 + .../io/ascii/tests/t/fixed_width_2_line.txt | 4 + astropy/io/ascii/tests/t/html.html | 29 + astropy/io/ascii/tests/t/html2.html | 28 + astropy/io/ascii/tests/t/ipac.dat | 12 + astropy/io/ascii/tests/t/ipac.dat.bz2 | Bin 0 -> 385 bytes astropy/io/ascii/tests/t/ipac.dat.xz | Bin 0 -> 320 bytes astropy/io/ascii/tests/t/latex1.tex | 10 + astropy/io/ascii/tests/t/latex1.tex.gz | Bin 0 -> 198 bytes astropy/io/ascii/tests/t/latex2.tex | 14 + astropy/io/ascii/tests/t/latex3.tex | 7 + astropy/io/ascii/tests/t/nls1_stackinfo.dbout | 60 + astropy/io/ascii/tests/t/no_data_cds.dat | 37 + astropy/io/ascii/tests/t/no_data_daophot.dat | 7 + astropy/io/ascii/tests/t/no_data_ipac.dat | 10 + .../io/ascii/tests/t/no_data_sextractor.dat | 5 + .../io/ascii/tests/t/no_data_with_header.dat | 1 + .../ascii/tests/t/no_data_without_header.dat | 2 + astropy/io/ascii/tests/t/sextractor.dat | 8 + astropy/io/ascii/tests/t/sextractor2.dat | 14 + astropy/io/ascii/tests/t/sextractor3.dat | 10 + astropy/io/ascii/tests/t/short.rdb | 14 + astropy/io/ascii/tests/t/short.rdb.bz2 | Bin 0 -> 146 bytes astropy/io/ascii/tests/t/short.rdb.gz | Bin 0 -> 148 bytes astropy/io/ascii/tests/t/short.rdb.xz | Bin 0 -> 192 bytes astropy/io/ascii/tests/t/short.tab | 8 + astropy/io/ascii/tests/t/simple.txt | 4 + astropy/io/ascii/tests/t/simple2.txt | 4 + astropy/io/ascii/tests/t/simple3.txt | 3 + astropy/io/ascii/tests/t/simple4.txt | 3 + astropy/io/ascii/tests/t/simple5.txt | 4 + astropy/io/ascii/tests/t/simple_csv.csv | 3 + .../io/ascii/tests/t/simple_csv_missing.csv | 3 + .../ascii/tests/t/space_delim_blank_lines.txt | 8 + .../ascii/tests/t/space_delim_no_header.dat | 2 + .../io/ascii/tests/t/space_delim_no_names.dat | 2 + astropy/io/ascii/tests/t/test4.dat | 12 + astropy/io/ascii/tests/t/test5.dat | 22 + astropy/io/ascii/tests/t/vizier/ReadMe | 89 + astropy/io/ascii/tests/t/vizier/table1.dat | 15 + astropy/io/ascii/tests/t/vizier/table5.dat | 49 + astropy/io/ascii/tests/t/vots_spec.dat | 99 + astropy/io/ascii/tests/t/whitespace.dat | 3 + astropy/io/ascii/tests/test_c_reader.py | 1122 + .../tests/test_cds_header_from_readme.py | 155 + astropy/io/ascii/tests/test_compressed.py | 55 + astropy/io/ascii/tests/test_connect.py | 140 + astropy/io/ascii/tests/test_ecsv.py | 418 + astropy/io/ascii/tests/test_fixedwidth.py | 481 + astropy/io/ascii/tests/test_html.py | 730 + .../io/ascii/tests/test_ipac_definitions.py | 151 + astropy/io/ascii/tests/test_read.py | 1299 + astropy/io/ascii/tests/test_rst.py | 184 + astropy/io/ascii/tests/test_types.py | 55 + astropy/io/ascii/tests/test_write.py | 741 + astropy/io/ascii/ui.py | 764 + astropy/io/fits/__init__.py | 92 + astropy/io/fits/_numpy_hacks.py | 66 + astropy/io/fits/card.py | 1269 + astropy/io/fits/column.py | 2272 ++ astropy/io/fits/connect.py | 226 + astropy/io/fits/convenience.py | 1058 + astropy/io/fits/diff.py | 1381 + astropy/io/fits/file.py | 593 + astropy/io/fits/fitsrec.py | 1358 + astropy/io/fits/hdu/__init__.py | 16 + astropy/io/fits/hdu/base.py | 1671 + astropy/io/fits/hdu/compressed.py | 1960 + astropy/io/fits/hdu/groups.py | 623 + astropy/io/fits/hdu/hdulist.py | 1373 + astropy/io/fits/hdu/image.py | 1142 + astropy/io/fits/hdu/nonstandard.py | 125 + astropy/io/fits/hdu/streaming.py | 231 + astropy/io/fits/hdu/table.py | 1500 + astropy/io/fits/header.py | 2129 + astropy/io/fits/py3compat.py | 90 + astropy/io/fits/scripts/__init__.py | 8 + astropy/io/fits/scripts/fitscheck.py | 220 + astropy/io/fits/scripts/fitsdiff.py | 328 + astropy/io/fits/scripts/fitsheader.py | 329 + astropy/io/fits/scripts/fitsinfo.py | 62 + astropy/io/fits/setup_package.py | 71 + astropy/io/fits/src/compressionmodule.c | 1181 + astropy/io/fits/src/compressionmodule.h | 67 + astropy/io/fits/tests/__init__.py | 61 + astropy/io/fits/tests/cfitsio_verify.c | 74 + astropy/io/fits/tests/data/arange.fits | Bin 0 -> 8640 bytes astropy/io/fits/tests/data/ascii.fits | 1 + astropy/io/fits/tests/data/blank.fits | Bin 0 -> 5760 bytes astropy/io/fits/tests/data/btable.fits | Bin 0 -> 8640 bytes astropy/io/fits/tests/data/checksum.fits | 1 + astropy/io/fits/tests/data/comp.fits | 405 + .../tests/data/compressed_float_bzero.fits | Bin 0 -> 8640 bytes .../io/fits/tests/data/compressed_image.fits | Bin 0 -> 8640 bytes astropy/io/fits/tests/data/fixed-1890.fits | 1 + astropy/io/fits/tests/data/group.fits | Bin 0 -> 5760 bytes .../io/fits/tests/data/history_header.fits | 1 + astropy/io/fits/tests/data/memtest.fits | 1 + astropy/io/fits/tests/data/o4sp040b0_raw.fits | 1 + astropy/io/fits/tests/data/random_groups.fits | 8 + astropy/io/fits/tests/data/scale.fits | Bin 0 -> 8640 bytes astropy/io/fits/tests/data/stddata.fits | Bin 0 -> 23040 bytes astropy/io/fits/tests/data/table.fits | Bin 0 -> 8640 bytes astropy/io/fits/tests/data/tb.fits | Bin 0 -> 8640 bytes astropy/io/fits/tests/data/tdim.fits | Bin 0 -> 8640 bytes astropy/io/fits/tests/data/test0.fits | 1 + .../tests/data/variable_length_table.fits | Bin 0 -> 8640 bytes astropy/io/fits/tests/data/zerowidth.fits | 27 + astropy/io/fits/tests/test_checksum.py | 454 + astropy/io/fits/tests/test_connect.py | 351 + astropy/io/fits/tests/test_convenience.py | 143 + astropy/io/fits/tests/test_core.py | 1205 + astropy/io/fits/tests/test_diff.py | 814 + astropy/io/fits/tests/test_division.py | 42 + astropy/io/fits/tests/test_fitsdiff.py | 227 + astropy/io/fits/tests/test_fitsheader.py | 79 + astropy/io/fits/tests/test_fitsinfo.py | 31 + astropy/io/fits/tests/test_groups.py | 212 + astropy/io/fits/tests/test_hdulist.py | 969 + astropy/io/fits/tests/test_header.py | 2752 ++ astropy/io/fits/tests/test_image.py | 1880 + astropy/io/fits/tests/test_nonstandard.py | 66 + astropy/io/fits/tests/test_structured.py | 101 + astropy/io/fits/tests/test_table.py | 3019 ++ astropy/io/fits/tests/test_uint.py | 114 + astropy/io/fits/tests/test_util.py | 173 + astropy/io/fits/util.py | 924 + astropy/io/fits/verify.py | 175 + astropy/io/misc/__init__.py | 7 + astropy/io/misc/connect.py | 10 + astropy/io/misc/hdf5.py | 303 + astropy/io/misc/pickle_helpers.py | 125 + astropy/io/misc/tests/__init__.py | 1 + astropy/io/misc/tests/test_hdf5.py | 466 + astropy/io/misc/tests/test_pickle_helpers.py | 108 + astropy/io/misc/tests/test_yaml.py | 183 + astropy/io/misc/yaml.py | 358 + astropy/io/registry.py | 618 + astropy/io/setup_package.py | 5 + astropy/io/tests/__init__.py | 0 astropy/io/tests/test_registry.py | 388 + astropy/io/votable/__init__.py | 34 + astropy/io/votable/connect.py | 167 + astropy/io/votable/converters.py | 1450 + astropy/io/votable/data/VOTable.dtd | 158 + astropy/io/votable/data/VOTable.v1.1.xsd | 466 + astropy/io/votable/data/VOTable.v1.2.xsd | 558 + astropy/io/votable/data/VOTable.v1.3.xsd | 572 + astropy/io/votable/data/ucd1p-words.txt | 473 + astropy/io/votable/exceptions.py | 1457 + astropy/io/votable/setup_package.py | 29 + astropy/io/votable/src/tablewriter.c | 409 + astropy/io/votable/table.py | 391 + astropy/io/votable/tests/__init__.py | 1 + astropy/io/votable/tests/converter_test.py | 276 + .../io/votable/tests/data/custom_datatype.xml | 18 + astropy/io/votable/tests/data/empty_table.xml | 12 + astropy/io/votable/tests/data/gemini.xml | 168 + .../io/votable/tests/data/irsa-nph-error.xml | 5 + .../io/votable/tests/data/irsa-nph-m31.xml | 70 + astropy/io/votable/tests/data/names.xml | 212 + astropy/io/votable/tests/data/no_resource.txt | 5 + astropy/io/votable/tests/data/no_resource.xml | 6 + .../votable/tests/data/nonstandard_units.xml | 13 + .../regression.bin.tabledata.truth.1.1.xml | 306 + .../regression.bin.tabledata.truth.1.3.xml | 309 + astropy/io/votable/tests/data/regression.xml | 305 + astropy/io/votable/tests/data/tb.fits | Bin 0 -> 8640 bytes .../tests/data/too_many_columns.xml.gz | Bin 0 -> 1734 bytes astropy/io/votable/tests/data/validation.txt | 237 + astropy/io/votable/tests/exception_test.py | 49 + astropy/io/votable/tests/table_test.py | 178 + astropy/io/votable/tests/tree_test.py | 31 + astropy/io/votable/tests/ucd_test.py | 62 + astropy/io/votable/tests/util_test.py | 68 + astropy/io/votable/tests/vo_test.py | 1036 + astropy/io/votable/tree.py | 3653 ++ astropy/io/votable/ucd.py | 195 + astropy/io/votable/util.py | 216 + astropy/io/votable/validator/__init__.py | 6 + astropy/io/votable/validator/html.py | 313 + astropy/io/votable/validator/main.py | 162 + astropy/io/votable/validator/result.py | 360 + .../io/votable/validator/urls/cone.big.dat.gz | Bin 0 -> 168333 bytes .../votable/validator/urls/cone.broken.dat.gz | Bin 0 -> 350 bytes .../votable/validator/urls/cone.good.dat.gz | Bin 0 -> 168334 bytes .../validator/urls/cone.incorrect.dat.gz | Bin 0 -> 721 bytes astropy/io/votable/volint.py | 18 + astropy/io/votable/xmlutil.py | 129 + astropy/logger.py | 588 + astropy/modeling/__init__.py | 13 + astropy/modeling/blackbody.py | 348 + astropy/modeling/core.py | 3346 ++ astropy/modeling/fitting.py | 1275 + astropy/modeling/functional_models.py | 2559 ++ astropy/modeling/mappings.py | 178 + astropy/modeling/models.py | 71 + astropy/modeling/optimizers.py | 246 + astropy/modeling/parameters.py | 920 + astropy/modeling/polynomial.py | 1512 + astropy/modeling/powerlaws.py | 520 + astropy/modeling/projections.py | 1991 + astropy/modeling/rotations.py | 402 + astropy/modeling/setup_package.py | 160 + astropy/modeling/src/projections.c | 1792 + astropy/modeling/src/projections.c.templ | 245 + astropy/modeling/src/wcsconfig.h | 38 + astropy/modeling/statistic.py | 44 + astropy/modeling/tabular.py | 300 + astropy/modeling/tests/__init__.py | 0 astropy/modeling/tests/data/1904-66_AZP.fits | 353 + astropy/modeling/tests/data/__init__.py | 5 + astropy/modeling/tests/data/hst_sip.hdr | 42 + astropy/modeling/tests/data/idcompspec.fits | 36 + astropy/modeling/tests/data/irac_sip.hdr | 241 + astropy/modeling/tests/example_models.py | 339 + astropy/modeling/tests/irafutil.py | 266 + astropy/modeling/tests/test_blackbody.py | 148 + astropy/modeling/tests/test_compound.py | 916 + astropy/modeling/tests/test_constraints.py | 480 + astropy/modeling/tests/test_core.py | 377 + astropy/modeling/tests/test_fitters.py | 710 + .../modeling/tests/test_functional_models.py | 271 + astropy/modeling/tests/test_input.py | 851 + astropy/modeling/tests/test_mappings.py | 90 + astropy/modeling/tests/test_models.py | 624 + .../modeling/tests/test_models_quantities.py | 330 + astropy/modeling/tests/test_parameters.py | 625 + astropy/modeling/tests/test_polynomial.py | 385 + astropy/modeling/tests/test_projections.py | 310 + .../tests/test_quantities_evaluation.py | 190 + .../modeling/tests/test_quantities_fitting.py | 136 + .../modeling/tests/test_quantities_model.py | 25 + .../tests/test_quantities_parameters.py | 342 + .../tests/test_quantities_rotations.py | 104 + astropy/modeling/tests/test_rotations.py | 125 + astropy/modeling/tests/test_utils.py | 105 + astropy/modeling/tests/utils.py | 20 + astropy/modeling/utils.py | 653 + astropy/nddata/__init__.py | 50 + astropy/nddata/ccddata.py | 589 + astropy/nddata/compat.py | 300 + astropy/nddata/decorators.py | 281 + astropy/nddata/flag_collection.py | 50 + astropy/nddata/mixins/__init__.py | 0 astropy/nddata/mixins/ndarithmetic.py | 619 + astropy/nddata/mixins/ndio.py | 39 + astropy/nddata/mixins/ndslicing.py | 125 + astropy/nddata/mixins/tests/__init__.py | 0 .../nddata/mixins/tests/test_ndarithmetic.py | 834 + astropy/nddata/mixins/tests/test_ndio.py | 14 + astropy/nddata/mixins/tests/test_ndslicing.py | 163 + astropy/nddata/nddata.py | 310 + astropy/nddata/nddata_base.py | 71 + astropy/nddata/nddata_withmixins.py | 74 + astropy/nddata/nduncertainty.py | 657 + astropy/nddata/setup_package.py | 9 + astropy/nddata/tests/__init__.py | 0 astropy/nddata/tests/data/sip-wcs.fits | 1 + astropy/nddata/tests/test_ccddata.py | 877 + astropy/nddata/tests/test_compat.py | 145 + astropy/nddata/tests/test_decorators.py | 367 + astropy/nddata/tests/test_flag_collection.py | 53 + astropy/nddata/tests/test_nddata.py | 435 + astropy/nddata/tests/test_nddata_base.py | 46 + astropy/nddata/tests/test_nduncertainty.py | 245 + astropy/nddata/tests/test_utils.py | 463 + astropy/nddata/utils.py | 915 + astropy/samp/__init__.py | 39 + astropy/samp/client.py | 722 + astropy/samp/constants.py | 27 + astropy/samp/data/astropy_icon.png | Bin 0 -> 1434 bytes astropy/samp/data/clientaccesspolicy.xml | 13 + astropy/samp/data/crossdomain.xml | 7 + astropy/samp/errors.py | 37 + astropy/samp/hub.py | 1405 + astropy/samp/hub_proxy.py | 205 + astropy/samp/hub_script.py | 144 + astropy/samp/integrated_client.py | 500 + astropy/samp/lockfile_helpers.py | 271 + astropy/samp/setup_package.py | 15 + astropy/samp/standard_profile.py | 157 + astropy/samp/tests/__init__.py | 0 astropy/samp/tests/test_client.py | 50 + astropy/samp/tests/test_errors.py | 25 + astropy/samp/tests/test_helpers.py | 70 + astropy/samp/tests/test_hub.py | 40 + astropy/samp/tests/test_hub_proxy.py | 52 + astropy/samp/tests/test_hub_script.py | 24 + astropy/samp/tests/test_standard_profile.py | 234 + astropy/samp/tests/test_web_profile.py | 93 + .../samp/tests/web_profile_test_helpers.py | 266 + astropy/samp/utils.py | 164 + astropy/samp/web_profile.py | 182 + astropy/setup_package.py | 5 + astropy/stats/__init__.py | 23 + astropy/stats/bayesian_blocks.py | 517 + astropy/stats/biweight.py | 644 + astropy/stats/circstats.py | 454 + astropy/stats/funcs.py | 1249 + astropy/stats/histogram.py | 357 + astropy/stats/info_theory.py | 403 + astropy/stats/jackknife.py | 182 + astropy/stats/lombscargle/__init__.py | 8 + astropy/stats/lombscargle/core.py | 358 + .../lombscargle/implementations/__init__.py | 8 + .../lombscargle/implementations/chi2_impl.py | 88 + .../lombscargle/implementations/cython_impl.c | 26403 +++++++++++++ .../implementations/cython_impl.pyx | 248 + .../lombscargle/implementations/fast_impl.py | 137 + .../implementations/fastchi2_impl.py | 137 + .../stats/lombscargle/implementations/main.py | 228 + .../stats/lombscargle/implementations/mle.py | 110 + .../lombscargle/implementations/scipy_impl.py | 74 + .../lombscargle/implementations/slow_impl.py | 122 + .../implementations/tests/__init__.py | 0 .../implementations/tests/test_mle.py | 55 + .../implementations/tests/test_utils.py | 83 + .../lombscargle/implementations/utils.py | 183 + astropy/stats/lombscargle/tests/__init__.py | 0 .../lombscargle/tests/test_lombscargle.py | 497 + astropy/stats/setup_package.py | 2 + astropy/stats/sigma_clipping.py | 464 + astropy/stats/spatial.py | 331 + astropy/stats/tests/__init__.py | 0 astropy/stats/tests/test_bayesian_blocks.py | 148 + astropy/stats/tests/test_biweight.py | 268 + astropy/stats/tests/test_circstats.py | 125 + astropy/stats/tests/test_funcs.py | 639 + astropy/stats/tests/test_histogram.py | 143 + astropy/stats/tests/test_info_theory.py | 74 + astropy/stats/tests/test_jackknife.py | 60 + astropy/stats/tests/test_sigma_clipping.py | 196 + astropy/stats/tests/test_spatial.py | 144 + astropy/table/__init__.py | 63 + astropy/table/_column_mixins.c | 6095 +++ astropy/table/_column_mixins.pyx | 90 + astropy/table/_np_utils.c | 9176 +++++ astropy/table/_np_utils.pyx | 134 + astropy/table/bst.py | 681 + astropy/table/column.py | 1291 + astropy/table/groups.py | 406 + astropy/table/index.py | 858 + astropy/table/info.py | 125 + astropy/table/jsviewer.py | 198 + astropy/table/meta.py | 342 + astropy/table/np_utils.py | 203 + astropy/table/operations.py | 872 + astropy/table/pprint.py | 713 + astropy/table/row.py | 176 + astropy/table/serialize.py | 211 + astropy/table/setup_package.py | 25 + astropy/table/sorted_array.py | 317 + astropy/table/table.py | 2852 ++ astropy/table/table_helpers.py | 179 + astropy/table/tests/__init__.py | 0 astropy/table/tests/conftest.py | 198 + astropy/table/tests/test_array.py | 46 + astropy/table/tests/test_bst.py | 108 + astropy/table/tests/test_column.py | 834 + astropy/table/tests/test_groups.py | 581 + astropy/table/tests/test_index.py | 463 + astropy/table/tests/test_info.py | 246 + astropy/table/tests/test_init_table.py | 492 + astropy/table/tests/test_item_access.py | 263 + astropy/table/tests/test_jsviewer.py | 178 + astropy/table/tests/test_masked.py | 417 + astropy/table/tests/test_mixin.py | 618 + astropy/table/tests/test_np_utils.py | 46 + astropy/table/tests/test_operations.py | 1159 + astropy/table/tests/test_pickle.py | 118 + astropy/table/tests/test_pprint.py | 697 + astropy/table/tests/test_row.py | 204 + astropy/table/tests/test_subclass.py | 99 + astropy/table/tests/test_table.py | 1952 + astropy/tests/__init__.py | 28 + astropy/tests/command.py | 331 + astropy/tests/coveragerc | 33 + astropy/tests/disable_internet.py | 153 + astropy/tests/helper.py | 526 + astropy/tests/image_tests.py | 10 + astropy/tests/output_checker.py | 186 + astropy/tests/pytest_plugins.py | 371 + astropy/tests/pytest_repeat.py | 27 + astropy/tests/runner.py | 525 + astropy/tests/setup_package.py | 11 + astropy/tests/test_logger.py | 489 + astropy/tests/tests/__init__.py | 2 + .../tests/tests/data/open_file_detection.txt | 1 + astropy/tests/tests/test_imports.py | 72 + .../tests/tests/test_open_file_detection.py | 17 + astropy/tests/tests/test_quantity_helpers.py | 38 + astropy/tests/tests/test_run_tests.py | 72 + astropy/tests/tests/test_runner.py | 87 + astropy/tests/tests/test_skip_remote_data.py | 49 + astropy/tests/tests/test_socketblocker.py | 87 + astropy/time/__init__.py | 3 + astropy/time/core.py | 1758 + astropy/time/formats.py | 1150 + astropy/time/setup_package.py | 5 + astropy/time/tests/__init__.py | 0 astropy/time/tests/test_basic.py | 1141 + astropy/time/tests/test_comparisons.py | 73 + astropy/time/tests/test_corrs.py | 66 + astropy/time/tests/test_delta.py | 422 + astropy/time/tests/test_guess.py | 31 + astropy/time/tests/test_methods.py | 394 + astropy/time/tests/test_pickle.py | 27 + astropy/time/tests/test_precision.py | 122 + .../time/tests/test_quantity_interaction.py | 218 + astropy/time/tests/test_sidereal.py | 167 + astropy/time/tests/test_ut1.py | 92 + astropy/time/utils.py | 122 + astropy/units/__init__.py | 40 + astropy/units/astrophys.py | 182 + astropy/units/cds.py | 190 + astropy/units/cgs.py | 136 + astropy/units/core.py | 2336 ++ astropy/units/decorators.py | 232 + astropy/units/deprecated.py | 70 + astropy/units/equivalencies.py | 579 + astropy/units/format/__init__.py | 65 + astropy/units/format/base.py | 55 + astropy/units/format/cds.py | 359 + astropy/units/format/cds_lextab.py | 13 + astropy/units/format/cds_parsetab.py | 55 + astropy/units/format/console.py | 100 + astropy/units/format/fits.py | 160 + astropy/units/format/generic.py | 516 + astropy/units/format/generic_lextab.py | 12 + astropy/units/format/generic_parsetab.py | 86 + astropy/units/format/latex.py | 143 + astropy/units/format/ogip.py | 468 + astropy/units/format/ogip_lextab.py | 13 + astropy/units/format/ogip_parsetab.py | 72 + astropy/units/format/unicode_format.py | 71 + astropy/units/format/utils.py | 222 + astropy/units/format/vounit.py | 239 + astropy/units/function/__init__.py | 10 + astropy/units/function/core.py | 658 + astropy/units/function/logarithmic.py | 336 + .../units/function/magnitude_zero_points.py | 70 + astropy/units/function/mixin.py | 25 + astropy/units/function/units.py | 48 + astropy/units/imperial.py | 169 + astropy/units/physical.py | 135 + astropy/units/quantity.py | 1720 + astropy/units/quantity_helper.py | 639 + astropy/units/required_by_vounit.py | 61 + astropy/units/setup_package.py | 2 + astropy/units/si.py | 243 + astropy/units/tests/__init__.py | 0 .../tests/py3_test_quantity_annotations.py | 291 + astropy/units/tests/test_deprecated.py | 63 + astropy/units/tests/test_equivalencies.py | 687 + astropy/units/tests/test_format.py | 482 + astropy/units/tests/test_logarithmic.py | 864 + astropy/units/tests/test_physical.py | 65 + astropy/units/tests/test_quantity.py | 1469 + .../tests/test_quantity_array_methods.py | 557 + .../units/tests/test_quantity_decorator.py | 329 + .../units/tests/test_quantity_non_ufuncs.py | 39 + astropy/units/tests/test_quantity_ufuncs.py | 976 + astropy/units/tests/test_units.py | 791 + astropy/units/utils.py | 268 + astropy/utils/__init__.py | 18 + astropy/utils/argparse.py | 56 + astropy/utils/codegen.py | 144 + astropy/utils/collections.py | 52 + astropy/utils/compat/__init__.py | 14 + astropy/utils/compat/_funcsigs.py | 813 + astropy/utils/compat/funcsigs.py | 6 + astropy/utils/compat/futures/__init__.py | 21 + astropy/utils/compat/futures/_base.py | 639 + astropy/utils/compat/futures/process.py | 389 + astropy/utils/compat/futures/thread.py | 153 + astropy/utils/compat/misc.py | 149 + astropy/utils/compat/numpy/__init__.py | 11 + astropy/utils/compat/numpy/core/__init__.py | 0 astropy/utils/compat/numpy/core/multiarray.py | 87 + astropy/utils/compat/numpy/lib/__init__.py | 0 .../utils/compat/numpy/lib/stride_tricks.py | 196 + astropy/utils/compat/numpy/tests/__init__.py | 0 .../numpy/tests/test_broadcast_arrays.py | 60 + .../utils/compat/numpy/tests/test_matmul.py | 69 + astropy/utils/compat/numpycompat.py | 23 + astropy/utils/console.py | 1113 + astropy/utils/data.py | 1423 + astropy/utils/data_info.py | 628 + astropy/utils/decorators.py | 1171 + astropy/utils/exceptions.py | 46 + astropy/utils/iers/__init__.py | 4 + astropy/utils/iers/data/ReadMe.eopc04_IAU2000 | 41 + astropy/utils/iers/data/ReadMe.finals2000A | 57 + astropy/utils/iers/data/eopc04_IAU2000.62-now | 19875 ++++++++++ astropy/utils/iers/iers.py | 710 + astropy/utils/iers/tests/__init__.py | 2 + .../iers/tests/finals2000A-2016-02-30-test | 181 + .../iers/tests/finals2000A-2016-04-30-test | 181 + astropy/utils/iers/tests/iers_a_excerpt | 60 + astropy/utils/iers/tests/test_iers.py | 223 + astropy/utils/introspection.py | 415 + astropy/utils/metadata.py | 420 + astropy/utils/misc.py | 1130 + astropy/utils/setup_package.py | 41 + astropy/utils/src/compiler.c | 129 + astropy/utils/state.py | 73 + astropy/utils/tests/__init__.py | 2 + astropy/utils/tests/data/.hidden_file.txt | 1 + astropy/utils/tests/data/alias.cfg | 2 + astropy/utils/tests/data/local.dat | 2 + astropy/utils/tests/data/local.dat.bz2 | Bin 0 -> 96 bytes astropy/utils/tests/data/local.dat.gz | Bin 0 -> 94 bytes astropy/utils/tests/data/local.dat.xz | Bin 0 -> 128 bytes .../utils/tests/data/test_package/__init__.py | 5 + .../tests/data/test_package/data/foo.txt | 0 astropy/utils/tests/data/unicode.txt | 2 + astropy/utils/tests/data/unicode.txt.bz2 | Bin 0 -> 89 bytes astropy/utils/tests/data/unicode.txt.gz | Bin 0 -> 86 bytes astropy/utils/tests/data/unicode.txt.xz | Bin 0 -> 116 bytes astropy/utils/tests/test_codegen.py | 43 + astropy/utils/tests/test_collections.py | 35 + astropy/utils/tests/test_console.py | 224 + astropy/utils/tests/test_data.py | 492 + astropy/utils/tests/test_data_info.py | 51 + astropy/utils/tests/test_decorators.py | 758 + astropy/utils/tests/test_introspection.py | 118 + astropy/utils/tests/test_metadata.py | 212 + astropy/utils/tests/test_misc.py | 126 + astropy/utils/tests/test_timer.py | 91 + astropy/utils/tests/test_xml.py | 111 + astropy/utils/timer.py | 379 + astropy/utils/xml/__init__.py | 2 + astropy/utils/xml/check.py | 78 + astropy/utils/xml/iterparser.py | 216 + astropy/utils/xml/setup_package.py | 46 + astropy/utils/xml/src/expat_config.h | 93 + astropy/utils/xml/src/iterparse.c | 1412 + astropy/utils/xml/src/iterparse.map | 7 + astropy/utils/xml/tests/__init__.py | 0 astropy/utils/xml/tests/test_iterparse.py | 134 + astropy/utils/xml/unescaper.py | 60 + astropy/utils/xml/validate.py | 58 + astropy/utils/xml/writer.py | 351 + astropy/version.py | 25 + astropy/visualization/__init__.py | 10 + astropy/visualization/hist.py | 63 + astropy/visualization/interval.py | 288 + astropy/visualization/lupton_rgb.py | 370 + astropy/visualization/mpl_normalize.py | 234 + astropy/visualization/mpl_style.py | 113 + astropy/visualization/scripts/__init__.py | 1 + astropy/visualization/scripts/fits2bitmap.py | 174 + .../visualization/scripts/tests/__init__.py | 1 + .../scripts/tests/test_fits2bitmap.py | 73 + astropy/visualization/stretch.py | 528 + astropy/visualization/tests/__init__.py | 1 + astropy/visualization/tests/test_histogram.py | 71 + astropy/visualization/tests/test_interval.py | 136 + .../visualization/tests/test_lupton_rgb.py | 243 + astropy/visualization/tests/test_norm.py | 172 + astropy/visualization/tests/test_stretch.py | 105 + astropy/visualization/tests/test_units.py | 51 + astropy/visualization/transform.py | 43 + astropy/visualization/units.py | 101 + astropy/visualization/wcsaxes/__init__.py | 38 + astropy/visualization/wcsaxes/axislabels.py | 126 + .../wcsaxes/coordinate_helpers.py | 781 + .../visualization/wcsaxes/coordinate_range.py | 126 + .../visualization/wcsaxes/coordinates_map.py | 163 + astropy/visualization/wcsaxes/core.py | 535 + .../wcsaxes/formatter_locator.py | 466 + astropy/visualization/wcsaxes/frame.py | 266 + astropy/visualization/wcsaxes/grid_paths.py | 121 + astropy/visualization/wcsaxes/patches.py | 91 + .../visualization/wcsaxes/tests/__init__.py | 8 + .../wcsaxes/tests/data/2MASS_k_header | 14 + .../wcsaxes/tests/data/cube_header | 20 + .../wcsaxes/tests/data/msx_header | 13 + .../wcsaxes/tests/data/rosat_header | 13 + .../wcsaxes/tests/data/slice_header | 16 + .../visualization/wcsaxes/tests/datasets.py | 48 + .../wcsaxes/tests/setup_package.py | 2 + .../wcsaxes/tests/test_coordinate_helpers.py | 14 + .../tests/test_display_world_coordinates.py | 117 + .../wcsaxes/tests/test_formatter_locator.py | 340 + .../visualization/wcsaxes/tests/test_frame.py | 172 + .../wcsaxes/tests/test_images.py | 539 + .../visualization/wcsaxes/tests/test_misc.py | 191 + .../tests/test_transform_coord_meta.py | 163 + .../wcsaxes/tests/test_transforms.py | 52 + .../visualization/wcsaxes/tests/test_utils.py | 71 + astropy/visualization/wcsaxes/ticklabels.py | 211 + astropy/visualization/wcsaxes/ticks.py | 181 + astropy/visualization/wcsaxes/transforms.py | 268 + astropy/visualization/wcsaxes/utils.py | 136 + astropy/vo/__init__.py | 28 + astropy/vo/client/__init__.py | 0 astropy/vo/client/async.py | 86 + astropy/vo/client/conesearch.py | 524 + astropy/vo/client/exceptions.py | 46 + astropy/vo/client/setup_package.py | 10 + astropy/vo/client/tests/__init__.py | 0 astropy/vo/client/tests/data/basic.json | 10 + .../client/tests/data/conesearch_error1.xml | 6 + .../client/tests/data/conesearch_error2.xml | 8 + .../client/tests/data/conesearch_error3.xml | 9 + .../client/tests/data/conesearch_error4.xml | 9 + astropy/vo/client/tests/test_conesearch.py | 280 + astropy/vo/client/tests/test_vos_catalog.py | 238 + astropy/vo/client/vos_catalog.py | 934 + astropy/vo/samp/__init__.py | 7 + astropy/vo/validator/__init__.py | 40 + astropy/vo/validator/data/conesearch_urls.txt | 30 + astropy/vo/validator/exceptions.py | 25 + astropy/vo/validator/inspect.py | 193 + astropy/vo/validator/setup_package.py | 12 + astropy/vo/validator/tests/__init__.py | 0 .../tests/data/conesearch_error.json | 4 + .../tests/data/conesearch_exception.json | 4 + .../validator/tests/data/conesearch_good.json | 47 + .../validator/tests/data/conesearch_warn.json | 4 + astropy/vo/validator/tests/data/listcats1.out | 4 + astropy/vo/validator/tests/data/listcats2.out | 2 + astropy/vo/validator/tests/data/printcat.out | 44 + astropy/vo/validator/tests/data/tally.out | 5 + .../vao_conesearch_sites_121107_subset.xml | 65 + astropy/vo/validator/tests/test_inpect.py | 86 + astropy/vo/validator/tests/test_validate.py | 95 + astropy/vo/validator/tstquery.py | 93 + astropy/vo/validator/validate.py | 346 + astropy/wcs/__init__.py | 42 + astropy/wcs/_docutil.py | 61 + astropy/wcs/docstrings.py | 2278 ++ astropy/wcs/include/astropy_wcs/astropy_wcs.h | 22 + .../wcs/include/astropy_wcs/astropy_wcs_api.h | 122 + astropy/wcs/include/astropy_wcs/distortion.h | 108 + .../wcs/include/astropy_wcs/distortion_wrap.h | 24 + astropy/wcs/include/astropy_wcs/docstrings.h | 154 + astropy/wcs/include/astropy_wcs/isnan.h | 42 + astropy/wcs/include/astropy_wcs/pipeline.h | 105 + astropy/wcs/include/astropy_wcs/pyutil.h | 362 + astropy/wcs/include/astropy_wcs/sip.h | 170 + astropy/wcs/include/astropy_wcs/sip_wrap.h | 23 + .../wcs/include/astropy_wcs/str_list_proxy.h | 38 + .../wcs/include/astropy_wcs/unit_list_proxy.h | 47 + astropy/wcs/include/astropy_wcs/util.h | 35 + astropy/wcs/include/astropy_wcs/wcsconfig.h | 38 + .../include/astropy_wcs/wcslib_tabprm_wrap.h | 25 + .../include/astropy_wcs/wcslib_units_wrap.h | 33 + astropy/wcs/include/astropy_wcs/wcslib_wrap.h | 26 + .../include/astropy_wcs/wcslib_wtbarr_wrap.h | 25 + astropy/wcs/include/astropy_wcs_api.h | 1 + astropy/wcs/include/wcsconfig.h | 38 + astropy/wcs/include/wcslib/cel.h | 458 + astropy/wcs/include/wcslib/lin.h | 681 + astropy/wcs/include/wcslib/prj.h | 846 + astropy/wcs/include/wcslib/spc.h | 909 + astropy/wcs/include/wcslib/spx.h | 559 + astropy/wcs/include/wcslib/tab.h | 635 + astropy/wcs/include/wcslib/wcs.h | 1749 + astropy/wcs/include/wcslib/wcserr.h | 257 + astropy/wcs/include/wcslib/wcsmath.h | 75 + astropy/wcs/include/wcslib/wcsprintf.h | 152 + astropy/wcs/setup_package.py | 363 + astropy/wcs/src/astropy_wcs.c | 908 + astropy/wcs/src/astropy_wcs_api.c | 56 + astropy/wcs/src/distortion.c | 226 + astropy/wcs/src/distortion_wrap.c | 359 + astropy/wcs/src/docstrings.c | 5423 +++ astropy/wcs/src/pipeline.c | 257 + astropy/wcs/src/pyutil.c | 965 + astropy/wcs/src/sip.c | 332 + astropy/wcs/src/sip_wrap.c | 531 + astropy/wcs/src/str_list_proxy.c | 283 + astropy/wcs/src/unit_list_proxy.c | 369 + astropy/wcs/src/util.c | 40 + astropy/wcs/src/wcslib_tabprm_wrap.c | 485 + astropy/wcs/src/wcslib_wrap.c | 3502 ++ astropy/wcs/tests/__init__.py | 2 + astropy/wcs/tests/data/2wcses.hdr | 1 + astropy/wcs/tests/data/3d_cd.hdr | 16 + astropy/wcs/tests/data/defunct_keywords.hdr | 1 + astropy/wcs/tests/data/dist.fits | Bin 0 -> 23040 bytes astropy/wcs/tests/data/dist_lookup.fits.gz | Bin 0 -> 68483 bytes astropy/wcs/tests/data/header_newlines.fits | 1 + astropy/wcs/tests/data/invalid_header.hdr | 1 + astropy/wcs/tests/data/irac_sip.hdr | 1 + astropy/wcs/tests/data/j94f05bgq_flt.fits | 1 + astropy/wcs/tests/data/locale.hdr | 1 + astropy/wcs/tests/data/nonstandard_units.hdr | 1 + astropy/wcs/tests/data/outside_sky.hdr | 1 + astropy/wcs/tests/data/sip-broken.hdr | 1 + astropy/wcs/tests/data/sip.fits | 1 + astropy/wcs/tests/data/sip2.fits | 1 + astropy/wcs/tests/data/siponly.hdr | 1 + astropy/wcs/tests/data/sub-segfault.hdr | 28 + astropy/wcs/tests/data/too_many_pv.hdr | 1 + astropy/wcs/tests/data/tpvonly.hdr | 1 + astropy/wcs/tests/data/unit.hdr | 1 + astropy/wcs/tests/data/validate.5.0.txt | 16 + astropy/wcs/tests/data/validate.5.13.txt | 16 + astropy/wcs/tests/data/validate.fits | 1 + astropy/wcs/tests/data/validate.txt | 16 + astropy/wcs/tests/data/zpn-hole.hdr | 1 + astropy/wcs/tests/extension/__init__.py | 0 astropy/wcs/tests/extension/setup.py | 50 + astropy/wcs/tests/extension/test_extension.py | 75 + astropy/wcs/tests/extension/wcsapi_test.c | 76 + astropy/wcs/tests/maps/1904-66_AIR.hdr | 1 + astropy/wcs/tests/maps/1904-66_AIT.hdr | 1 + astropy/wcs/tests/maps/1904-66_ARC.hdr | 1 + astropy/wcs/tests/maps/1904-66_AZP.hdr | 1 + astropy/wcs/tests/maps/1904-66_BON.hdr | 1 + astropy/wcs/tests/maps/1904-66_CAR.hdr | 1 + astropy/wcs/tests/maps/1904-66_CEA.hdr | 1 + astropy/wcs/tests/maps/1904-66_COD.hdr | 1 + astropy/wcs/tests/maps/1904-66_COE.hdr | 1 + astropy/wcs/tests/maps/1904-66_COO.hdr | 1 + astropy/wcs/tests/maps/1904-66_COP.hdr | 1 + astropy/wcs/tests/maps/1904-66_CSC.hdr | 1 + astropy/wcs/tests/maps/1904-66_CYP.hdr | 1 + astropy/wcs/tests/maps/1904-66_HPX.hdr | 1 + astropy/wcs/tests/maps/1904-66_MER.hdr | 1 + astropy/wcs/tests/maps/1904-66_MOL.hdr | 1 + astropy/wcs/tests/maps/1904-66_NCP.hdr | 1 + astropy/wcs/tests/maps/1904-66_PAR.hdr | 1 + astropy/wcs/tests/maps/1904-66_PCO.hdr | 1 + astropy/wcs/tests/maps/1904-66_QSC.hdr | 1 + astropy/wcs/tests/maps/1904-66_SFL.hdr | 1 + astropy/wcs/tests/maps/1904-66_SIN.hdr | 1 + astropy/wcs/tests/maps/1904-66_STG.hdr | 1 + astropy/wcs/tests/maps/1904-66_SZP.hdr | 1 + astropy/wcs/tests/maps/1904-66_TAN.hdr | 1 + astropy/wcs/tests/maps/1904-66_TSC.hdr | 1 + astropy/wcs/tests/maps/1904-66_ZEA.hdr | 1 + astropy/wcs/tests/maps/1904-66_ZPN.hdr | 1 + astropy/wcs/tests/spectra/orion-freq-1.hdr | 1 + astropy/wcs/tests/spectra/orion-freq-4.hdr | 1 + astropy/wcs/tests/spectra/orion-velo-1.hdr | 1 + astropy/wcs/tests/spectra/orion-velo-4.hdr | 1 + astropy/wcs/tests/spectra/orion-wave-1.hdr | 1 + astropy/wcs/tests/spectra/orion-wave-4.hdr | 1 + astropy/wcs/tests/test_pickle.py | 103 + astropy/wcs/tests/test_profiling.py | 72 + astropy/wcs/tests/test_utils.py | 437 + astropy/wcs/tests/test_wcs.py | 1061 + astropy/wcs/tests/test_wcsprm.py | 1069 + astropy/wcs/utils.py | 506 + astropy/wcs/wcs.py | 3294 ++ astropy/wcs/wcslint.py | 19 + astropy_helpers/CHANGES.rst | 440 + astropy_helpers/LICENSE.rst | 26 + astropy_helpers/README.rst | 51 + astropy_helpers/ah_bootstrap.py | 958 + .../astropy_helpers.egg-info/PKG-INFO | 74 + .../astropy_helpers.egg-info/SOURCES.txt | 78 + .../dependency_links.txt | 1 + .../astropy_helpers.egg-info/not-zip-safe | 1 + .../astropy_helpers.egg-info/top_level.txt | 1 + astropy_helpers/astropy_helpers/__init__.py | 52 + .../astropy_helpers/commands/__init__.py | 0 .../astropy_helpers/commands/_dummy.py | 81 + .../astropy_helpers/commands/_test_compat.py | 307 + .../astropy_helpers/commands/build_ext.py | 474 + .../astropy_helpers/commands/build_py.py | 39 + .../astropy_helpers/commands/build_sphinx.py | 261 + .../astropy_helpers/commands/install.py | 14 + .../astropy_helpers/commands/install_lib.py | 14 + .../astropy_helpers/commands/register.py | 53 + .../astropy_helpers/commands/setup_package.py | 5 + .../astropy_helpers/commands/src/compiler.c | 129 + .../astropy_helpers/commands/test.py | 35 + .../astropy_helpers/compat/__init__.py | 12 + .../astropy_helpers/distutils_helpers.py | 257 + .../astropy_helpers/extern/__init__.py | 11 + .../extern/automodapi/__init__.py | 1 + .../extern/automodapi/autodoc_enhancements.py | 135 + .../extern/automodapi/automodapi.py | 423 + .../extern/automodapi/automodsumm.py | 652 + .../extern/automodapi/smart_resolver.py | 92 + .../templates/autosummary_core/base.rst | 10 + .../templates/autosummary_core/class.rst | 65 + .../templates/autosummary_core/module.rst | 41 + .../extern/automodapi/utils.py | 214 + .../extern/numpydoc/__init__.py | 5 + .../extern/numpydoc/docscrape.py | 603 + .../extern/numpydoc/docscrape_sphinx.py | 309 + .../extern/numpydoc/numpydoc.py | 271 + .../numpydoc/templates/numpydoc_docstring.rst | 16 + .../astropy_helpers/extern/setup_package.py | 4 + .../astropy_helpers/git_helpers.py | 193 + .../astropy_helpers/openmp_helpers.py | 107 + .../astropy_helpers/setup_helpers.py | 771 + .../astropy_helpers/sphinx/__init__.py | 8 + .../astropy_helpers/sphinx/conf.py | 344 + .../astropy_helpers/sphinx/ext/__init__.py | 2 + .../sphinx/ext/changelog_links.py | 82 + .../astropy_helpers/sphinx/ext/doctest.py | 56 + .../sphinx/ext/edit_on_github.py | 168 + .../sphinx/ext/tests/__init__.py | 0 .../astropy_helpers/sphinx/ext/tocdepthfix.py | 22 + .../sphinx/local/python2_local_links.inv | Bin 0 -> 562 bytes .../sphinx/local/python2_local_links.txt | 25 + .../sphinx/local/python3_local_links.inv | Bin 0 -> 658 bytes .../sphinx/local/python3_local_links.txt | 38 + .../astropy_helpers/sphinx/setup_package.py | 10 + .../themes/bootstrap-astropy/globaltoc.html | 3 + .../themes/bootstrap-astropy/layout.html | 96 + .../themes/bootstrap-astropy/localtoc.html | 3 + .../themes/bootstrap-astropy/searchbox.html | 7 + .../static/astropy_linkout.svg | 75 + .../static/astropy_linkout_20.png | Bin 0 -> 1725 bytes .../bootstrap-astropy/static/astropy_logo.ico | Bin 0 -> 32988 bytes .../bootstrap-astropy/static/astropy_logo.svg | 87 + .../static/astropy_logo_32.png | Bin 0 -> 1884 bytes .../static/bootstrap-astropy.css | 601 + .../bootstrap-astropy/static/copybutton.js | 63 + .../bootstrap-astropy/static/sidebar.js | 160 + .../themes/bootstrap-astropy/theme.conf | 10 + .../astropy_helpers/test_helpers.py | 13 + astropy_helpers/astropy_helpers/utils.py | 862 + astropy_helpers/astropy_helpers/version.py | 25 + .../astropy_helpers/version_helpers.py | 300 + astropy_helpers/ez_setup.py | 414 + .../licenses/LICENSE_ASTROSCRAPPY.rst | 28 + .../licenses/LICENSE_COPYBUTTON.rst | 50 + astropy_helpers/licenses/LICENSE_NUMPYDOC.rst | 94 + cextern/.gitignore | 1 + cextern/README.rst | 8 + cextern/cfitsio/License.txt | 25 + cextern/cfitsio/adler32.c | 167 + cextern/cfitsio/buffers.c | 1371 + cextern/cfitsio/cfileio.c | 7267 ++++ cextern/cfitsio/changes.txt | 4050 ++ cextern/cfitsio/checksum.c | 508 + cextern/cfitsio/crc32.c | 440 + cextern/cfitsio/crc32.h | 441 + cextern/cfitsio/deflate.c | 1832 + cextern/cfitsio/deflate.h | 340 + cextern/cfitsio/drvrfile.c | 973 + cextern/cfitsio/drvrgsiftp.c | 522 + cextern/cfitsio/drvrgsiftp.h | 21 + cextern/cfitsio/drvrmem.c | 1214 + cextern/cfitsio/drvrnet.c | 2741 ++ cextern/cfitsio/drvrsmem.c | 973 + cextern/cfitsio/drvrsmem.h | 179 + cextern/cfitsio/editcol.c | 2477 ++ cextern/cfitsio/edithdu.c | 883 + cextern/cfitsio/eval.l | 545 + cextern/cfitsio/eval.y | 5837 +++ cextern/cfitsio/eval_defs.h | 163 + cextern/cfitsio/eval_f.c | 2839 ++ cextern/cfitsio/eval_l.c | 2252 ++ cextern/cfitsio/eval_tab.h | 42 + cextern/cfitsio/eval_y.c | 7333 ++++ cextern/cfitsio/fits_hcompress.c | 1858 + cextern/cfitsio/fits_hdecompress.c | 2618 ++ cextern/cfitsio/fitscore.c | 9349 +++++ cextern/cfitsio/fitsio.h | 1953 + cextern/cfitsio/fitsio2.h | 1209 + cextern/cfitsio/getcol.c | 1055 + cextern/cfitsio/getcolb.c | 2002 + cextern/cfitsio/getcold.c | 1677 + cextern/cfitsio/getcole.c | 1680 + cextern/cfitsio/getcoli.c | 1902 + cextern/cfitsio/getcolj.c | 3728 ++ cextern/cfitsio/getcolk.c | 1895 + cextern/cfitsio/getcoll.c | 621 + cextern/cfitsio/getcols.c | 835 + cextern/cfitsio/getcolsb.c | 1991 + cextern/cfitsio/getcolui.c | 1908 + cextern/cfitsio/getcoluj.c | 1902 + cextern/cfitsio/getcoluk.c | 1917 + cextern/cfitsio/getkey.c | 3247 ++ cextern/cfitsio/group.c | 6463 +++ cextern/cfitsio/group.h | 65 + cextern/cfitsio/grparser.c | 1379 + cextern/cfitsio/grparser.h | 185 + cextern/cfitsio/histo.c | 2221 ++ cextern/cfitsio/imcompress.c | 9837 +++++ cextern/cfitsio/infback.c | 632 + cextern/cfitsio/inffast.c | 340 + cextern/cfitsio/inffast.h | 11 + cextern/cfitsio/inffixed.h | 94 + cextern/cfitsio/inflate.c | 1480 + cextern/cfitsio/inflate.h | 122 + cextern/cfitsio/inftrees.c | 330 + cextern/cfitsio/inftrees.h | 62 + cextern/cfitsio/iraffits.c | 2073 + cextern/cfitsio/longnam.h | 593 + cextern/cfitsio/modkey.c | 1755 + cextern/cfitsio/pliocomp.c | 331 + cextern/cfitsio/putcol.c | 1929 + cextern/cfitsio/putcolb.c | 1013 + cextern/cfitsio/putcold.c | 1060 + cextern/cfitsio/putcole.c | 1074 + cextern/cfitsio/putcoli.c | 986 + cextern/cfitsio/putcolj.c | 1992 + cextern/cfitsio/putcolk.c | 1013 + cextern/cfitsio/putcoll.c | 369 + cextern/cfitsio/putcols.c | 303 + cextern/cfitsio/putcolsb.c | 974 + cextern/cfitsio/putcolu.c | 629 + cextern/cfitsio/putcolui.c | 969 + cextern/cfitsio/putcoluj.c | 977 + cextern/cfitsio/putcoluk.c | 993 + cextern/cfitsio/putkey.c | 3085 ++ cextern/cfitsio/quantize.c | 3920 ++ cextern/cfitsio/region.c | 1752 + cextern/cfitsio/region.h | 82 + cextern/cfitsio/ricecomp.c | 1353 + cextern/cfitsio/scalnull.c | 229 + cextern/cfitsio/swapproc.c | 247 + cextern/cfitsio/trees.c | 1242 + cextern/cfitsio/trees.h | 128 + cextern/cfitsio/uncompr.c | 57 + cextern/cfitsio/wcssub.c | 1043 + cextern/cfitsio/wcsutil.c | 502 + cextern/cfitsio/zcompress.c | 504 + cextern/cfitsio/zconf.h | 426 + cextern/cfitsio/zlib.h | 1613 + cextern/cfitsio/zuncompress.c | 603 + cextern/cfitsio/zutil.c | 316 + cextern/cfitsio/zutil.h | 272 + cextern/erfa/README.rst | 105 + cextern/erfa/a2af.c | 129 + cextern/erfa/a2tf.c | 125 + cextern/erfa/ab.c | 137 + cextern/erfa/af2a.c | 116 + cextern/erfa/anp.c | 91 + cextern/erfa/anpm.c | 91 + cextern/erfa/apcg.c | 181 + cextern/erfa/apcg13.c | 184 + cextern/erfa/apci.c | 190 + cextern/erfa/apci13.c | 202 + cextern/erfa/apco.c | 264 + cextern/erfa/apco13.c | 287 + cextern/erfa/apcs.c | 233 + cextern/erfa/apcs13.c | 191 + cextern/erfa/aper.c | 162 + cextern/erfa/aper13.c | 181 + cextern/erfa/apio.c | 213 + cextern/erfa/apio13.c | 259 + cextern/erfa/atci13.c | 159 + cextern/erfa/atciq.c | 154 + cextern/erfa/atciqn.c | 191 + cextern/erfa/atciqz.c | 153 + cextern/erfa/atco13.c | 243 + cextern/erfa/atic13.c | 152 + cextern/erfa/aticq.c | 199 + cextern/erfa/aticqn.c | 237 + cextern/erfa/atio13.c | 222 + cextern/erfa/atioq.c | 243 + cextern/erfa/atoc13.c | 233 + cextern/erfa/atoi13.c | 228 + cextern/erfa/atoiq.c | 260 + cextern/erfa/bi00.c | 125 + cextern/erfa/bp00.c | 181 + cextern/erfa/bp06.c | 152 + cextern/erfa/bpn2xy.c | 109 + cextern/erfa/c2i00a.c | 148 + cextern/erfa/c2i00b.c | 148 + cextern/erfa/c2i06a.c | 145 + cextern/erfa/c2ibpn.c | 151 + cextern/erfa/c2ixy.c | 140 + cextern/erfa/c2ixys.c | 132 + cextern/erfa/c2s.c | 105 + cextern/erfa/c2t00a.c | 163 + cextern/erfa/c2t00b.c | 159 + cextern/erfa/c2t06a.c | 161 + cextern/erfa/c2tcio.c | 131 + cextern/erfa/c2teqx.c | 131 + cextern/erfa/c2tpe.c | 176 + cextern/erfa/c2txy.c | 168 + cextern/erfa/cal2jd.c | 148 + cextern/erfa/cp.c | 89 + cextern/erfa/cpv.c | 91 + cextern/erfa/cr.c | 92 + cextern/erfa/d2dtf.c | 245 + cextern/erfa/d2tf.c | 169 + cextern/erfa/dat.c | 306 + cextern/erfa/dtdb.c | 1222 + cextern/erfa/dtf2d.c | 212 + cextern/erfa/eceq06.c | 141 + cextern/erfa/ecm06.c | 144 + cextern/erfa/ee00.c | 137 + cextern/erfa/ee00a.c | 144 + cextern/erfa/ee00b.c | 150 + cextern/erfa/ee06a.c | 131 + cextern/erfa/eect00.c | 291 + cextern/erfa/eform.c | 155 + cextern/erfa/eo06a.c | 140 + cextern/erfa/eors.c | 117 + cextern/erfa/epb.c | 100 + cextern/erfa/epb2jd.c | 100 + cextern/erfa/epj.c | 102 + cextern/erfa/epj2jd.c | 100 + cextern/erfa/epv00.c | 2598 ++ cextern/erfa/eqec06.c | 142 + cextern/erfa/eqeq94.c | 141 + cextern/erfa/era00.c | 145 + cextern/erfa/erfa.h | 520 + cextern/erfa/erfaextra.h | 59 + cextern/erfa/erfam.h | 208 + cextern/erfa/erfaversion.c | 53 + cextern/erfa/fad03.c | 112 + cextern/erfa/fae03.c | 111 + cextern/erfa/faf03.c | 115 + cextern/erfa/faju03.c | 111 + cextern/erfa/fal03.c | 112 + cextern/erfa/falp03.c | 112 + cextern/erfa/fama03.c | 111 + cextern/erfa/fame03.c | 111 + cextern/erfa/fane03.c | 108 + cextern/erfa/faom03.c | 113 + cextern/erfa/fapa03.c | 112 + cextern/erfa/fasa03.c | 111 + cextern/erfa/faur03.c | 108 + cextern/erfa/fave03.c | 111 + cextern/erfa/fk52h.c | 152 + cextern/erfa/fk5hip.c | 135 + cextern/erfa/fk5hz.c | 169 + cextern/erfa/fw2m.c | 143 + cextern/erfa/fw2xy.c | 130 + cextern/erfa/g2icrs.c | 170 + cextern/erfa/gc2gd.c | 143 + cextern/erfa/gc2gde.c | 208 + cextern/erfa/gd2gc.c | 142 + cextern/erfa/gd2gce.c | 146 + cextern/erfa/gmst00.c | 154 + cextern/erfa/gmst06.c | 145 + cextern/erfa/gmst82.c | 160 + cextern/erfa/gst00a.c | 147 + cextern/erfa/gst00b.c | 155 + cextern/erfa/gst06.c | 149 + cextern/erfa/gst06a.c | 140 + cextern/erfa/gst94.c | 140 + cextern/erfa/h2fk5.c | 157 + cextern/erfa/hfk5z.c | 184 + cextern/erfa/icrs2g.c | 170 + cextern/erfa/ir.c | 92 + cextern/erfa/jd2cal.c | 164 + cextern/erfa/jdcalf.c | 170 + cextern/erfa/ld.c | 161 + cextern/erfa/ldn.c | 183 + cextern/erfa/ldsun.c | 115 + cextern/erfa/lteceq.c | 138 + cextern/erfa/ltecm.c | 157 + cextern/erfa/lteqec.c | 139 + cextern/erfa/ltp.c | 140 + cextern/erfa/ltpb.c | 133 + cextern/erfa/ltpecl.c | 177 + cextern/erfa/ltpequ.c | 177 + cextern/erfa/num00a.c | 130 + cextern/erfa/num00b.c | 130 + cextern/erfa/num06a.c | 134 + cextern/erfa/numat.c | 118 + cextern/erfa/nut00a.c | 2056 + cextern/erfa/nut00b.c | 381 + cextern/erfa/nut06a.c | 162 + cextern/erfa/nut80.c | 334 + cextern/erfa/nutm80.c | 126 + cextern/erfa/obl06.c | 127 + cextern/erfa/obl80.c | 127 + cextern/erfa/p06e.c | 330 + cextern/erfa/p2pv.c | 92 + cextern/erfa/p2s.c | 100 + cextern/erfa/pap.c | 148 + cextern/erfa/pas.c | 105 + cextern/erfa/pb06.c | 153 + cextern/erfa/pdp.c | 93 + cextern/erfa/pfw06.c | 174 + cextern/erfa/plan94.c | 523 + cextern/erfa/pm.c | 85 + cextern/erfa/pmat00.c | 127 + cextern/erfa/pmat06.c | 131 + cextern/erfa/pmat76.c | 150 + cextern/erfa/pmp.c | 94 + cextern/erfa/pmpx.c | 153 + cextern/erfa/pmsafe.c | 206 + cextern/erfa/pn.c | 118 + cextern/erfa/pn00.c | 186 + cextern/erfa/pn00a.c | 172 + cextern/erfa/pn00b.c | 172 + cextern/erfa/pn06.c | 196 + cextern/erfa/pn06a.c | 162 + cextern/erfa/pnm00a.c | 130 + cextern/erfa/pnm00b.c | 130 + cextern/erfa/pnm06a.c | 133 + cextern/erfa/pnm80.c | 135 + cextern/erfa/pom00.c | 124 + cextern/erfa/ppp.c | 94 + cextern/erfa/ppsp.c | 103 + cextern/erfa/pr00.c | 151 + cextern/erfa/prec76.c | 157 + cextern/erfa/pv2p.c | 90 + cextern/erfa/pv2s.c | 153 + cextern/erfa/pvdpv.c | 111 + cextern/erfa/pvm.c | 95 + cextern/erfa/pvmpv.c | 96 + cextern/erfa/pvppv.c | 96 + cextern/erfa/pvstar.c | 216 + cextern/erfa/pvtob.c | 162 + cextern/erfa/pvu.c | 102 + cextern/erfa/pvup.c | 97 + cextern/erfa/pvxpv.c | 116 + cextern/erfa/pxp.c | 103 + cextern/erfa/refco.c | 262 + cextern/erfa/rm2v.c | 120 + cextern/erfa/rv2m.c | 127 + cextern/erfa/rx.c | 119 + cextern/erfa/rxp.c | 108 + cextern/erfa/rxpv.c | 95 + cextern/erfa/rxr.c | 108 + cextern/erfa/ry.c | 119 + cextern/erfa/rz.c | 119 + cextern/erfa/s00.c | 380 + cextern/erfa/s00a.c | 152 + cextern/erfa/s00b.c | 152 + cextern/erfa/s06.c | 377 + cextern/erfa/s06a.c | 154 + cextern/erfa/s2c.c | 94 + cextern/erfa/s2p.c | 97 + cextern/erfa/s2pv.c | 112 + cextern/erfa/s2xpv.c | 96 + cextern/erfa/sepp.c | 114 + cextern/erfa/seps.c | 102 + cextern/erfa/sp00.c | 127 + cextern/erfa/starpm.c | 214 + cextern/erfa/starpv.c | 274 + cextern/erfa/sxp.c | 93 + cextern/erfa/sxpv.c | 94 + cextern/erfa/taitt.c | 119 + cextern/erfa/taiut1.c | 120 + cextern/erfa/taiutc.c | 168 + cextern/erfa/tcbtdb.c | 141 + cextern/erfa/tcgtt.c | 118 + cextern/erfa/tdbtcb.c | 146 + cextern/erfa/tdbtt.c | 130 + cextern/erfa/tf2a.c | 116 + cextern/erfa/tf2d.c | 116 + cextern/erfa/tr.c | 102 + cextern/erfa/trxp.c | 102 + cextern/erfa/trxpv.c | 102 + cextern/erfa/tttai.c | 119 + cextern/erfa/tttcg.c | 121 + cextern/erfa/tttdb.c | 130 + cextern/erfa/ttut1.c | 119 + cextern/erfa/ut1tai.c | 120 + cextern/erfa/ut1tt.c | 119 + cextern/erfa/ut1utc.c | 202 + cextern/erfa/utctai.c | 186 + cextern/erfa/utcut1.c | 156 + cextern/erfa/xy06.c | 2767 ++ cextern/erfa/xys00a.c | 142 + cextern/erfa/xys00b.c | 142 + cextern/erfa/xys06a.c | 142 + cextern/erfa/zp.c | 86 + cextern/erfa/zpv.c | 88 + cextern/erfa/zr.c | 92 + cextern/expat/CMake.README | 42 + cextern/expat/CMakeLists.txt | 111 + cextern/expat/COPYING | 22 + cextern/expat/Changes | 205 + cextern/expat/ConfigureChecks.cmake | 44 + cextern/expat/MANIFEST | 141 + cextern/expat/Makefile.in | 201 + cextern/expat/README | 139 + cextern/expat/aclocal.m4 | 8460 ++++ cextern/expat/amiga/Makefile | 336 + cextern/expat/amiga/README.txt | 98 + cextern/expat/amiga/expat.xml | 264 + cextern/expat/amiga/expat_68k.c | 939 + cextern/expat/amiga/expat_68k.h | 94 + cextern/expat/amiga/expat_68k_handler_stubs.c | 185 + cextern/expat/amiga/expat_base.h | 40 + cextern/expat/amiga/expat_lib.c | 247 + cextern/expat/amiga/expat_vectors.c | 505 + cextern/expat/amiga/include/inline4/expat.h | 94 + .../expat/amiga/include/interfaces/expat.h | 98 + cextern/expat/amiga/include/libraries/expat.h | 566 + cextern/expat/amiga/include/proto/expat.h | 52 + cextern/expat/amiga/launch.c | 57 + cextern/expat/amiga/stdlib.c | 109 + cextern/expat/bcb5/README.txt | 87 + cextern/expat/bcb5/all_projects.bpg | 49 + cextern/expat/bcb5/elements.bpf | 4 + cextern/expat/bcb5/elements.bpr | 149 + cextern/expat/bcb5/elements.mak | 186 + cextern/expat/bcb5/expat.bpf | 6 + cextern/expat/bcb5/expat.bpr | 140 + cextern/expat/bcb5/expat.mak | 187 + cextern/expat/bcb5/expat_static.bpf | 5 + cextern/expat/bcb5/expat_static.bpr | 143 + cextern/expat/bcb5/expat_static.mak | 189 + cextern/expat/bcb5/expatw.bpf | 6 + cextern/expat/bcb5/expatw.bpr | 146 + cextern/expat/bcb5/expatw.mak | 187 + cextern/expat/bcb5/expatw_static.bpf | 5 + cextern/expat/bcb5/expatw_static.bpr | 152 + cextern/expat/bcb5/expatw_static.mak | 190 + cextern/expat/bcb5/libexpat_mtd.def | 141 + cextern/expat/bcb5/libexpatw_mtd.def | 140 + cextern/expat/bcb5/makefile.mak | 37 + cextern/expat/bcb5/outline.bpf | 4 + cextern/expat/bcb5/outline.bpr | 132 + cextern/expat/bcb5/outline.mak | 186 + cextern/expat/bcb5/setup.bat | 9 + cextern/expat/bcb5/xmlwf.bpf | 7 + cextern/expat/bcb5/xmlwf.bpr | 136 + cextern/expat/bcb5/xmlwf.mak | 187 + cextern/expat/configure | 18632 +++++++++ cextern/expat/configure.in | 155 + cextern/expat/conftools/PrintPath | 116 + .../expat/conftools/ac_c_bigendian_cross.m4 | 81 + cextern/expat/conftools/expat.m4 | 43 + cextern/expat/conftools/get-version.sh | 46 + cextern/expat/conftools/install-sh | 520 + cextern/expat/conftools/ltmain.sh | 9642 +++++ cextern/expat/doc/expat.png | Bin 0 -> 1027 bytes cextern/expat/doc/reference.html | 2390 ++ cextern/expat/doc/style.css | 101 + cextern/expat/doc/valid-xhtml10.png | Bin 0 -> 2368 bytes cextern/expat/doc/xmlwf.1 | 251 + cextern/expat/doc/xmlwf.sgml | 468 + cextern/expat/examples/elements.c | 65 + cextern/expat/examples/elements.dsp | 103 + cextern/expat/examples/outline.c | 106 + cextern/expat/examples/outline.dsp | 103 + cextern/expat/expat.dsw | 110 + cextern/expat/expat.pc.in | 11 + cextern/expat/expat_config.h.cmake | 91 + cextern/expat/expat_config.h.in | 102 + cextern/expat/lib/Makefile.MPW | 206 + cextern/expat/lib/amigaconfig.h | 32 + cextern/expat/lib/ascii.h | 92 + cextern/expat/lib/asciitab.h | 36 + cextern/expat/lib/expat.dsp | 185 + cextern/expat/lib/expat.h | 1047 + cextern/expat/lib/expat_external.h | 115 + cextern/expat/lib/expat_static.dsp | 162 + cextern/expat/lib/expatw.dsp | 185 + cextern/expat/lib/expatw_static.dsp | 162 + cextern/expat/lib/iasciitab.h | 37 + cextern/expat/lib/internal.h | 73 + cextern/expat/lib/latin1tab.h | 36 + cextern/expat/lib/libexpat.def | 73 + cextern/expat/lib/libexpatw.def | 73 + cextern/expat/lib/macconfig.h | 53 + cextern/expat/lib/nametab.h | 150 + cextern/expat/lib/utf8tab.h | 37 + cextern/expat/lib/winconfig.h | 30 + cextern/expat/lib/xmlparse.c | 6403 +++ cextern/expat/lib/xmlrole.c | 1336 + cextern/expat/lib/xmlrole.h | 114 + cextern/expat/lib/xmltok.c | 1651 + cextern/expat/lib/xmltok.h | 316 + cextern/expat/lib/xmltok_impl.c | 1783 + cextern/expat/lib/xmltok_impl.h | 46 + cextern/expat/lib/xmltok_ns.c | 115 + cextern/expat/m4/libtool.m4 | 7851 ++++ cextern/expat/m4/ltoptions.m4 | 369 + cextern/expat/m4/ltsugar.m4 | 123 + cextern/expat/m4/ltversion.m4 | 23 + cextern/expat/m4/lt~obsolete.m4 | 98 + cextern/expat/tests/README.txt | 13 + cextern/expat/tests/benchmark/README.txt | 16 + cextern/expat/tests/benchmark/benchmark.c | 114 + cextern/expat/tests/benchmark/benchmark.dsp | 88 + cextern/expat/tests/benchmark/benchmark.dsw | 44 + cextern/expat/tests/chardata.c | 131 + cextern/expat/tests/chardata.h | 40 + cextern/expat/tests/minicheck.c | 182 + cextern/expat/tests/minicheck.h | 90 + cextern/expat/tests/runtests.c | 1515 + cextern/expat/tests/runtestspp.cpp | 6 + cextern/expat/tests/xmltest.sh | 142 + cextern/expat/vms/README.vms | 23 + cextern/expat/vms/descrip.mms | 70 + cextern/expat/vms/expat_config.h | 52 + cextern/expat/win32/MANIFEST.txt | 27 + cextern/expat/win32/README.txt | 80 + cextern/expat/win32/expat.iss | 69 + cextern/expat/xmlwf/codepage.c | 68 + cextern/expat/xmlwf/codepage.h | 6 + cextern/expat/xmlwf/ct.c | 147 + cextern/expat/xmlwf/filemap.h | 17 + cextern/expat/xmlwf/readfilemap.c | 100 + cextern/expat/xmlwf/unixfilemap.c | 65 + cextern/expat/xmlwf/win32filemap.c | 96 + cextern/expat/xmlwf/xmlfile.c | 244 + cextern/expat/xmlwf/xmlfile.h | 20 + cextern/expat/xmlwf/xmlmime.c | 163 + cextern/expat/xmlwf/xmlmime.h | 19 + cextern/expat/xmlwf/xmltchar.h | 36 + cextern/expat/xmlwf/xmlurl.h | 13 + cextern/expat/xmlwf/xmlwf.c | 861 + cextern/expat/xmlwf/xmlwf.dsp | 139 + cextern/expat/xmlwf/xmlwin32url.cxx | 395 + cextern/trim_wcslib.sh | 13 + cextern/wcslib/C/GNUmakefile | 469 + cextern/wcslib/C/cel.c | 527 + cextern/wcslib/C/cel.h | 458 + cextern/wcslib/C/dis.c | 3587 ++ cextern/wcslib/C/dis.h | 1036 + cextern/wcslib/C/fitshdr.h | 444 + cextern/wcslib/C/fitshdr.l | 570 + cextern/wcslib/C/flexed/README | 5 + cextern/wcslib/C/flexed/fitshdr.c | 11951 ++++++ cextern/wcslib/C/flexed/wcsbth.c | 25592 ++++++++++++ cextern/wcslib/C/flexed/wcspih.c | 21129 ++++++++++ cextern/wcslib/C/flexed/wcsulex.c | 9437 +++++ cextern/wcslib/C/flexed/wcsutrn.c | 5644 +++ cextern/wcslib/C/getwcstab.c | 154 + cextern/wcslib/C/getwcstab.h | 193 + cextern/wcslib/C/lin.c | 1288 + cextern/wcslib/C/lin.h | 681 + cextern/wcslib/C/log.c | 112 + cextern/wcslib/C/log.h | 167 + cextern/wcslib/C/prj.c | 8499 ++++ cextern/wcslib/C/prj.h | 846 + cextern/wcslib/C/spc.c | 1415 + cextern/wcslib/C/spc.h | 909 + cextern/wcslib/C/sph.c | 460 + cextern/wcslib/C/sph.h | 251 + cextern/wcslib/C/spx.c | 1162 + cextern/wcslib/C/spx.h | 559 + cextern/wcslib/C/tab.c | 1647 + cextern/wcslib/C/tab.h | 635 + cextern/wcslib/C/wcs.c | 3791 ++ cextern/wcslib/C/wcs.h | 1749 + cextern/wcslib/C/wcsbth.l | 2966 ++ cextern/wcslib/C/wcserr.c | 160 + cextern/wcslib/C/wcserr.h | 257 + cextern/wcslib/C/wcsfix.c | 777 + cextern/wcslib/C/wcsfix.h | 400 + cextern/wcslib/C/wcshdr.c | 1931 + cextern/wcslib/C/wcshdr.h | 1233 + cextern/wcslib/C/wcslib.h | 62 + cextern/wcslib/C/wcsmath.h | 75 + cextern/wcslib/C/wcspih.l | 2473 ++ cextern/wcslib/C/wcsprintf.c | 171 + cextern/wcslib/C/wcsprintf.h | 152 + cextern/wcslib/C/wcstrig.c | 219 + cextern/wcslib/C/wcstrig.h | 217 + cextern/wcslib/C/wcsulex.l | 1001 + cextern/wcslib/C/wcsunits.c | 225 + cextern/wcslib/C/wcsunits.h | 411 + cextern/wcslib/C/wcsutil.c | 383 + cextern/wcslib/C/wcsutil.h | 385 + cextern/wcslib/C/wcsutrn.l | 337 + cextern/wcslib/CHANGES | 2577 ++ cextern/wcslib/COPYING | 674 + cextern/wcslib/COPYING.LESSER | 165 + cextern/wcslib/GNUmakefile | 205 + cextern/wcslib/INSTALL | 336 + cextern/wcslib/README | 44 + cextern/wcslib/THANKS | 94 + cextern/wcslib/VALIDATION | 520 + cextern/wcslib/config/config.guess | 1519 + cextern/wcslib/config/config.sub | 1766 + cextern/wcslib/config/elisp-comp | 60 + cextern/wcslib/config/install-sh | 295 + cextern/wcslib/config/mdate-sh | 133 + cextern/wcslib/config/missing | 360 + cextern/wcslib/config/mkinstalldirs | 137 + cextern/wcslib/config/move-if-change | 13 + cextern/wcslib/configure | 9765 +++++ cextern/wcslib/configure.ac | 517 + cextern/wcslib/flavours | 174 + cextern/wcslib/makedefs.in | 240 + cextern/wcslib/wcsconfig.h.in | 21 + cextern/wcslib/wcsconfig_f77.h.in | 21 + cextern/wcslib/wcsconfig_tests.h.in | 18 + cextern/wcslib/wcsconfig_utils.h.in | 35 + cextern/wcslib/wcslib.pc.in | 11 + docs/Makefile | 133 + docs/_pkgtemplate.rst | 96 + docs/_static/astropy_banner.svg | 263 + docs/_static/astropy_banner_96.png | Bin 0 -> 26836 bytes docs/_static/astropy_logo.pdf | Bin 0 -> 5384 bytes docs/_static/timer_prediction_pow10.png | Bin 0 -> 28453 bytes docs/_templates/autosummary/base.rst | 1 + docs/_templates/autosummary/class.rst | 1 + docs/_templates/autosummary/module.rst | 1 + docs/analytic_functions/index.rst | 138 + docs/changelog.rst | 7 + docs/conf.py | 250 + docs/config/config_0_4_transition.rst | 327 + docs/config/index.rst | 368 + docs/constants/index.rst | 127 + docs/convolution/images/astropy.png | Bin 0 -> 9916 bytes docs/convolution/images/original.png | Bin 0 -> 10448 bytes docs/convolution/images/scipy.png | Bin 0 -> 10058 bytes docs/convolution/index.rst | 414 + docs/convolution/kernels.rst | 338 + docs/convolution/non_normalized_kernels.rst | 105 + docs/convolution/using.rst | 94 + docs/coordinates/angles.rst | 167 + docs/coordinates/definitions.rst | 43 + docs/coordinates/formatting.rst | 35 + docs/coordinates/frames.rst | 421 + docs/coordinates/galactocentric.rst | 120 + docs/coordinates/index.rst | 406 + docs/coordinates/inplace.rst | 43 + docs/coordinates/matchsep.rst | 241 + docs/coordinates/references.txt | 6 + docs/coordinates/remote_methods.rst | 65 + docs/coordinates/representations.rst | 596 + docs/coordinates/skycoord.rst | 947 + docs/coordinates/solarsystem.rst | 120 + docs/coordinates/transforming.rst | 120 + docs/coordinates/velocities.rst | 363 + docs/cosmology/index.rst | 478 + docs/credits.rst | 266 + docs/development/astropy-package-template.rst | 678 + docs/development/building.rst | 120 + docs/development/ccython.rst | 109 + docs/development/codeguide.rst | 791 + docs/development/codeguide_emacs.rst | 207 + docs/development/docguide.rst | 138 + docs/development/docrules.rst | 556 + docs/development/releasing.rst | 794 + docs/development/scripts.rst | 57 + docs/development/testguide.rst | 1092 + docs/development/vision.rst | 112 + .../workflow/additional_git_topics.rst | 413 + docs/development/workflow/branch_dropdown.png | Bin 0 -> 39966 bytes docs/development/workflow/command_history.rst | 30 + docs/development/workflow/command_history.sh | 112 + .../workflow/command_history_with_output.sh | 457 + .../workflow/development_workflow.rst | 540 + docs/development/workflow/forking_button.png | Bin 0 -> 9495 bytes .../workflow/get_devel_version.rst | 329 + .../workflow/git_edit_workflow_examples.rst | 562 + docs/development/workflow/git_install.rst | 63 + docs/development/workflow/git_links.inc | 62 + docs/development/workflow/git_resources.rst | 54 + docs/development/workflow/known_projects.inc | 40 + docs/development/workflow/links.inc | 4 + .../workflow/maintainer_workflow.rst | 259 + docs/development/workflow/milestone.png | Bin 0 -> 23069 bytes docs/development/workflow/patches.rst | 112 + docs/development/workflow/pull_button.png | Bin 0 -> 15528 bytes docs/development/workflow/terminal_cast.rst | 17 + docs/development/workflow/this_project.inc | 2 + docs/development/workflow/virtual_pythons.rst | 189 + .../workflow/virtualenv_detail.rst | 160 + .../workflow/worked_example_switch_branch.png | Bin 0 -> 34439 bytes docs/getting_started.rst | 7 + docs/importing_astropy.rst | 68 + docs/index.rst | 170 + docs/install.rst | 410 + docs/io/ascii/base_classes.rst | 21 + docs/io/ascii/extension_classes.rst | 31 + docs/io/ascii/fast_ascii_io.rst | 181 + docs/io/ascii/fixed_width_gallery.rst | 440 + docs/io/ascii/index.rst | 282 + docs/io/ascii/read.rst | 571 + docs/io/ascii/references.txt | 4 + docs/io/ascii/toc.txt | 9 + docs/io/ascii/write.rst | 373 + docs/io/fits/api/cards.rst | 13 + docs/io/fits/api/diff.rst | 47 + docs/io/fits/api/files.rst | 48 + docs/io/fits/api/hdulists.rst | 14 + docs/io/fits/api/hdus.rst | 43 + docs/io/fits/api/headers.rst | 13 + docs/io/fits/api/images.rst | 30 + docs/io/fits/api/tables.rst | 63 + docs/io/fits/api/verification.rst | 72 + docs/io/fits/appendix/faq.rst | 752 + docs/io/fits/appendix/header_transition.rst | 427 + docs/io/fits/appendix/history.rst | 3242 ++ docs/io/fits/index.rst | 895 + docs/io/fits/usage/headers.rst | 376 + docs/io/fits/usage/image.rst | 230 + docs/io/fits/usage/misc.rst | 42 + docs/io/fits/usage/scripts.rst | 35 + docs/io/fits/usage/table.rst | 370 + docs/io/fits/usage/unfamiliar.rst | 544 + docs/io/fits/usage/verification.rst | 355 + docs/io/misc.rst | 18 + docs/io/registry.rst | 125 + docs/io/unified.rst | 319 + docs/io/votable/.gitignore | 2 + docs/io/votable/api_exceptions.rst | 41 + docs/io/votable/index.rst | 453 + docs/io/votable/references.txt | 23 + docs/known_issues.rst | 359 + docs/license.rst | 17 + docs/logging.rst | 155 + docs/make.bat | 170 + docs/modeling/algorithms.rst | 61 + docs/modeling/bounding-boxes.rst | 158 + docs/modeling/compound-models.rst | 1428 + docs/modeling/fitting.rst | 206 + docs/modeling/index.rst | 383 + docs/modeling/links.inc | 1 + docs/modeling/models.rst | 369 + docs/modeling/new.rst | 467 + docs/modeling/parameters.rst | 170 + docs/modeling/units.rst | 317 + docs/nddata/ccddata.rst | 195 + docs/nddata/decorator.rst | 60 + docs/nddata/index.rst | 439 + docs/nddata/mixins/index.rst | 9 + docs/nddata/mixins/ndarithmetic.rst | 398 + docs/nddata/mixins/ndio.rst | 13 + docs/nddata/mixins/ndslicing.rst | 141 + docs/nddata/nddata.rst | 342 + docs/nddata/subclassing.rst | 557 + docs/nddata/utils.rst | 348 + docs/nitpick-exceptions | 87 + docs/overview.rst | 7 + docs/samp/advanced_embed_samp_hub.rst | 134 + docs/samp/example_clients.rst | 128 + docs/samp/example_hub.rst | 51 + docs/samp/example_table_image.rst | 274 + docs/samp/index.rst | 79 + docs/samp/references.txt | 5 + docs/stability.rst | 333 + docs/stats/circ.rst | 15 + docs/stats/index.rst | 140 + docs/stats/lombscargle.rst | 641 + docs/stats/ripley.rst | 74 + docs/stats/robust.rst | 156 + docs/table/access_table.rst | 752 + docs/table/construct_table.rst | 1079 + docs/table/implementation_details.rst | 40 + docs/table/index.rst | 394 + docs/table/indexing.rst | 228 + docs/table/io.rst | 65 + docs/table/masking.rst | 199 + docs/table/mixin_columns.rst | 369 + docs/table/modify_table.rst | 310 + docs/table/operations.rst | 939 + docs/table/pandas.rst | 64 + docs/table/references.txt | 7 + docs/table/table_architecture.png | Bin 0 -> 28771 bytes docs/table/table_repr_html.png | Bin 0 -> 12979 bytes docs/table/table_row.png | Bin 0 -> 41362 bytes docs/table/table_show_in_nb.png | Bin 0 -> 23540 bytes docs/testhelpers.rst | 76 + docs/time/index.rst | 1146 + docs/time/references.txt | 4 + docs/time/time_scale_conversion.odg | Bin 0 -> 12434 bytes docs/time/time_scale_conversion.png | Bin 0 -> 6773 bytes docs/units/combining_and_defining.rst | 80 + docs/units/conversion.rst | 47 + docs/units/decomposing_and_composing.rst | 106 + docs/units/equivalencies.rst | 432 + docs/units/format.rst | 238 + docs/units/index.rst | 220 + docs/units/logarithmic_units.rst | 221 + docs/units/quantity.rst | 486 + docs/units/standard_units.rst | 231 + docs/utils/iers.rst | 212 + docs/utils/index.rst | 109 + docs/utils/numpy.rst | 54 + docs/visualization/histogram.rst | 164 + docs/visualization/index.rst | 108 + docs/visualization/lupton_rgb.rst | 75 + docs/visualization/normalization.rst | 238 + .../wcsaxes/controlling_axes.rst | 54 + docs/visualization/wcsaxes/custom_frames.rst | 136 + .../visualization/wcsaxes/images_contours.rst | 53 + docs/visualization/wcsaxes/index.rst | 106 + .../wcsaxes/initializing_axes.rst | 115 + .../wcsaxes/overlaying_coordinate_systems.rst | 61 + docs/visualization/wcsaxes/overlays.rst | 273 + .../wcsaxes/slicing_datacubes.rst | 111 + .../wcsaxes/ticks_labels_grid.rst | 349 + docs/vo/conesearch/client.rst | 730 + .../images/astropy_vo_flowchart.png | Bin 0 -> 20057 bytes .../images/client_predict_search_n.png | Bin 0 -> 35668 bytes .../images/client_predict_search_t.png | Bin 0 -> 28522 bytes .../vo/conesearch/images/validator_html_1.png | Bin 0 -> 28152 bytes .../vo/conesearch/images/validator_html_2.png | Bin 0 -> 56370 bytes .../vo/conesearch/images/validator_html_3.png | Bin 0 -> 45038 bytes .../vo/conesearch/images/validator_html_4.png | Bin 0 -> 65404 bytes docs/vo/conesearch/index.rst | 206 + docs/vo/conesearch/validator.rst | 368 + docs/vo/index.rst | 39 + docs/warnings.rst | 74 + docs/wcs/examples/from_file.py | 44 + docs/wcs/examples/programmatic.py | 44 + docs/wcs/history.rst | 90 + docs/wcs/index.rst | 283 + docs/wcs/note_sip.rst | 86 + docs/wcs/references.rst | 7 + docs/wcs/references.txt | 5 + docs/wcs/relax.rst | 430 + docs/whatsnew/0.1.rst | 26 + docs/whatsnew/0.2.rst | 9 + docs/whatsnew/0.3.rst | 9 + docs/whatsnew/0.4.rst | 225 + docs/whatsnew/1.0.rst | 369 + docs/whatsnew/1.1.rst | 326 + docs/whatsnew/1.2.rst | 295 + docs/whatsnew/1.3.rst | 274 + docs/whatsnew/2.0.rst | 386 + docs/whatsnew/index.rst | 16 + examples/README.txt | 14 + examples/coordinates/README.txt | 6 + .../coordinates/plot_galactocentric-frame.py | 191 + examples/coordinates/plot_obs-planning.py | 155 + .../coordinates/plot_sgr-coordinate-frame.py | 253 + examples/coordinates/rv-to-gsr.py | 105 + examples/io/Hs-2009-14-a-web.jpg | Bin 0 -> 23490 bytes examples/io/README.txt | 6 + examples/io/create-mef.py | 54 + examples/io/fits-tables.py | 67 + examples/io/modify-fits-header.py | 86 + examples/io/plot_fits-image.py | 61 + examples/io/skip_create-large-fits.py | 112 + examples/io/split-jpeg-to-fits.py | 82 + examples/template/example-template.py | 101 + ez_setup.py | 414 + licenses/AURA_LICENSE.rst | 29 + licenses/CONFIGOBJ_LICENSE.rst | 32 + licenses/DATATABLES_LICENSE.rst | 29 + licenses/ERFA.rst | 53 + licenses/EXPAT_LICENSE.rst | 22 + licenses/FUTURES_LICENSE.rst | 21 + licenses/JQUERY_LICENSE.rst | 21 + licenses/NUMPY_LICENSE.rst | 30 + licenses/PLY_LICENSE.rst | 30 + licenses/PYFITS.rst | 29 + licenses/PYTEST_LICENSE.rst | 18 + licenses/README.rst | 6 + licenses/SIX_LICENSE.rst | 18 + licenses/SPHINXEXT_LICENSES.rst | 80 + licenses/SYMPY.rst | 28 + licenses/WCSLIB_LICENSE.rst | 165 + pip-requirements | 1 + pip-requirements-dev | 19 + pip-requirements-doc | 6 + setup.cfg | 34 + setup.py | 108 + static/wininst_background.bmp | Bin 0 -> 158742 bytes 1896 files changed, 955430 insertions(+) create mode 100644 .astropy-root create mode 100644 CHANGES.rst create mode 100644 LICENSE.rst create mode 100644 PKG-INFO create mode 100644 README.rst create mode 100644 ah_bootstrap.py create mode 100644 astropy/__init__.py create mode 100644 astropy/_compiler.c create mode 100644 astropy/_erfa/__init__.py create mode 100644 astropy/_erfa/core.c create mode 100644 astropy/_erfa/core.c.templ create mode 100644 astropy/_erfa/core.py create mode 100644 astropy/_erfa/core.py.templ create mode 100644 astropy/_erfa/erfa_generator.py create mode 100644 astropy/_erfa/setup_package.py create mode 100644 astropy/_erfa/tests/__init__.py create mode 100644 astropy/_erfa/tests/test_erfa.py create mode 100644 astropy/analytic_functions/__init__.py create mode 100644 astropy/analytic_functions/blackbody.py create mode 100644 astropy/analytic_functions/tests/__init__.py create mode 100644 astropy/analytic_functions/tests/test_blackbody.py create mode 100644 astropy/astropy.cfg create mode 100644 astropy/config/__init__.py create mode 100644 astropy/config/affiliated.py create mode 100644 astropy/config/configuration.py create mode 100644 astropy/config/paths.py create mode 100644 astropy/config/setup_package.py create mode 100644 astropy/config/tests/__init__.py create mode 100644 astropy/config/tests/data/alias.cfg create mode 100644 astropy/config/tests/data/astropy.0.3.cfg create mode 100644 astropy/config/tests/data/astropy.0.3.windows.cfg create mode 100644 astropy/config/tests/data/deprecated.cfg create mode 100644 astropy/config/tests/data/empty.cfg create mode 100644 astropy/config/tests/data/not_empty.cfg create mode 100644 astropy/config/tests/test_configs.py create mode 100644 astropy/conftest.py create mode 100644 astropy/constants/__init__.py create mode 100644 astropy/constants/astropyconst13.py create mode 100644 astropy/constants/astropyconst20.py create mode 100644 astropy/constants/cgs.py create mode 100644 astropy/constants/codata2010.py create mode 100644 astropy/constants/codata2014.py create mode 100644 astropy/constants/constant.py create mode 100644 astropy/constants/iau2012.py create mode 100644 astropy/constants/iau2015.py create mode 100644 astropy/constants/setup_package.py create mode 100644 astropy/constants/si.py create mode 100644 astropy/constants/tests/__init__.py create mode 100644 astropy/constants/tests/test_constant.py create mode 100644 astropy/constants/tests/test_pickle.py create mode 100644 astropy/constants/tests/test_prior_version.py create mode 100644 astropy/convolution/__init__.py create mode 100644 astropy/convolution/boundary_extend.c create mode 100644 astropy/convolution/boundary_extend.pyx create mode 100644 astropy/convolution/boundary_fill.c create mode 100644 astropy/convolution/boundary_fill.pyx create mode 100644 astropy/convolution/boundary_none.c create mode 100644 astropy/convolution/boundary_none.pyx create mode 100644 astropy/convolution/boundary_wrap.c create mode 100644 astropy/convolution/boundary_wrap.pyx create mode 100644 astropy/convolution/convolve.py create mode 100644 astropy/convolution/core.py create mode 100644 astropy/convolution/kernels.py create mode 100644 astropy/convolution/setup_package.py create mode 100644 astropy/convolution/tests/__init__.py create mode 100644 astropy/convolution/tests/test_convolve.py create mode 100644 astropy/convolution/tests/test_convolve_fft.py create mode 100644 astropy/convolution/tests/test_convolve_kernels.py create mode 100644 astropy/convolution/tests/test_convolve_models.py create mode 100644 astropy/convolution/tests/test_convolve_nddata.py create mode 100644 astropy/convolution/tests/test_convolve_speeds.py create mode 100644 astropy/convolution/tests/test_discretize.py create mode 100644 astropy/convolution/tests/test_kernel_class.py create mode 100644 astropy/convolution/tests/test_pickle.py create mode 100644 astropy/convolution/utils.py create mode 100644 astropy/coordinates/__init__.py create mode 100644 astropy/coordinates/angle_lextab.py create mode 100644 astropy/coordinates/angle_parsetab.py create mode 100644 astropy/coordinates/angle_utilities.py create mode 100644 astropy/coordinates/angles.py create mode 100644 astropy/coordinates/attributes.py create mode 100644 astropy/coordinates/baseframe.py create mode 100644 astropy/coordinates/builtin_frames/__init__.py create mode 100644 astropy/coordinates/builtin_frames/altaz.py create mode 100644 astropy/coordinates/builtin_frames/baseradec.py create mode 100644 astropy/coordinates/builtin_frames/cirs.py create mode 100644 astropy/coordinates/builtin_frames/cirs_observed_transforms.py create mode 100644 astropy/coordinates/builtin_frames/ecliptic.py create mode 100644 astropy/coordinates/builtin_frames/ecliptic_transforms.py create mode 100644 astropy/coordinates/builtin_frames/fk4.py create mode 100644 astropy/coordinates/builtin_frames/fk4_fk5_transforms.py create mode 100644 astropy/coordinates/builtin_frames/fk5.py create mode 100644 astropy/coordinates/builtin_frames/galactic.py create mode 100644 astropy/coordinates/builtin_frames/galactic_transforms.py create mode 100644 astropy/coordinates/builtin_frames/galactocentric.py create mode 100644 astropy/coordinates/builtin_frames/gcrs.py create mode 100644 astropy/coordinates/builtin_frames/hcrs.py create mode 100644 astropy/coordinates/builtin_frames/icrs.py create mode 100644 astropy/coordinates/builtin_frames/icrs_cirs_transforms.py create mode 100644 astropy/coordinates/builtin_frames/icrs_fk5_transforms.py create mode 100644 astropy/coordinates/builtin_frames/intermediate_rotation_transforms.py create mode 100644 astropy/coordinates/builtin_frames/itrs.py create mode 100644 astropy/coordinates/builtin_frames/lsr.py create mode 100644 astropy/coordinates/builtin_frames/skyoffset.py create mode 100644 astropy/coordinates/builtin_frames/supergalactic.py create mode 100644 astropy/coordinates/builtin_frames/supergalactic_transforms.py create mode 100644 astropy/coordinates/builtin_frames/utils.py create mode 100644 astropy/coordinates/calculation.py create mode 100644 astropy/coordinates/data/constellation_data_roman87.dat create mode 100644 astropy/coordinates/data/constellation_names.dat create mode 100644 astropy/coordinates/data/sites.json create mode 100644 astropy/coordinates/distances.py create mode 100644 astropy/coordinates/earth.py create mode 100644 astropy/coordinates/earth_orientation.py create mode 100644 astropy/coordinates/errors.py create mode 100644 astropy/coordinates/funcs.py create mode 100644 astropy/coordinates/matching.py create mode 100644 astropy/coordinates/matrix_utilities.py create mode 100644 astropy/coordinates/name_resolve.py create mode 100644 astropy/coordinates/orbital_elements.py create mode 100644 astropy/coordinates/representation.py create mode 100644 astropy/coordinates/setup_package.py create mode 100644 astropy/coordinates/sites.py create mode 100644 astropy/coordinates/sky_coordinate.py create mode 100644 astropy/coordinates/solar_system.py create mode 100644 astropy/coordinates/tests/__init__.py create mode 100644 astropy/coordinates/tests/accuracy/__init__.py create mode 100644 astropy/coordinates/tests/accuracy/fk4_no_e_fk4.csv create mode 100644 astropy/coordinates/tests/accuracy/fk4_no_e_fk5.csv create mode 100644 astropy/coordinates/tests/accuracy/galactic_fk4.csv create mode 100644 astropy/coordinates/tests/accuracy/generate_ref_ast.py create mode 100644 astropy/coordinates/tests/accuracy/icrs_fk5.csv create mode 100644 astropy/coordinates/tests/accuracy/test_altaz_icrs.py create mode 100644 astropy/coordinates/tests/accuracy/test_ecliptic.py create mode 100644 astropy/coordinates/tests/accuracy/test_fk4_no_e_fk4.py create mode 100644 astropy/coordinates/tests/accuracy/test_fk4_no_e_fk5.py create mode 100644 astropy/coordinates/tests/accuracy/test_galactic_fk4.py create mode 100644 astropy/coordinates/tests/accuracy/test_icrs_fk5.py create mode 100644 astropy/coordinates/tests/test_angles.py create mode 100644 astropy/coordinates/tests/test_angular_separation.py create mode 100644 astropy/coordinates/tests/test_api_ape5.py create mode 100644 astropy/coordinates/tests/test_arrays.py create mode 100644 astropy/coordinates/tests/test_atc_replacements.py create mode 100644 astropy/coordinates/tests/test_celestial_transformations.py create mode 100644 astropy/coordinates/tests/test_distance.py create mode 100644 astropy/coordinates/tests/test_earth.py create mode 100644 astropy/coordinates/tests/test_finite_difference_velocities.py create mode 100644 astropy/coordinates/tests/test_formatting.py create mode 100644 astropy/coordinates/tests/test_frames.py create mode 100644 astropy/coordinates/tests/test_frames_with_velocity.py create mode 100644 astropy/coordinates/tests/test_funcs.py create mode 100644 astropy/coordinates/tests/test_iau_fullstack.py create mode 100644 astropy/coordinates/tests/test_intermediate_transformations.py create mode 100644 astropy/coordinates/tests/test_matching.py create mode 100644 astropy/coordinates/tests/test_matrix_utilities.py create mode 100644 astropy/coordinates/tests/test_name_resolve.py create mode 100644 astropy/coordinates/tests/test_pickle.py create mode 100644 astropy/coordinates/tests/test_regression.py create mode 100644 astropy/coordinates/tests/test_representation.py create mode 100644 astropy/coordinates/tests/test_representation_arithmetic.py create mode 100644 astropy/coordinates/tests/test_representation_methods.py create mode 100644 astropy/coordinates/tests/test_shape_manipulation.py create mode 100644 astropy/coordinates/tests/test_sites.py create mode 100644 astropy/coordinates/tests/test_sky_coord.py create mode 100644 astropy/coordinates/tests/test_skyoffset_transformations.py create mode 100644 astropy/coordinates/tests/test_solar_system.py create mode 100644 astropy/coordinates/tests/test_transformations.py create mode 100644 astropy/coordinates/tests/test_unit_representation.py create mode 100644 astropy/coordinates/tests/test_velocity_corrs.py create mode 100644 astropy/coordinates/tests/utils.py create mode 100644 astropy/coordinates/transformations.py create mode 100644 astropy/cosmology/__init__.py create mode 100644 astropy/cosmology/core.py create mode 100644 astropy/cosmology/funcs.py create mode 100644 astropy/cosmology/parameters.py create mode 100644 astropy/cosmology/scalar_inv_efuncs.c create mode 100644 astropy/cosmology/scalar_inv_efuncs.pyx create mode 100644 astropy/cosmology/setup_package.py create mode 100644 astropy/cosmology/tests/__init__.py create mode 100644 astropy/cosmology/tests/test_cosmology.py create mode 100644 astropy/cosmology/tests/test_pickle.py create mode 100644 astropy/cython_version.py create mode 100644 astropy/extern/__init__.py create mode 100644 astropy/extern/bundled/__init__.py create mode 100644 astropy/extern/bundled/six.py create mode 100644 astropy/extern/configobj/__init__.py create mode 100755 astropy/extern/configobj/configobj.py create mode 100755 astropy/extern/configobj/validate.py create mode 100644 astropy/extern/css/jquery.dataTables.css create mode 100644 astropy/extern/js/jquery-3.1.1.js create mode 100644 astropy/extern/js/jquery-3.1.1.min.js create mode 100644 astropy/extern/js/jquery.dataTables.js create mode 100644 astropy/extern/js/jquery.dataTables.min.js create mode 100644 astropy/extern/plugins/__init__.py create mode 100644 astropy/extern/plugins/pytest_doctestplus/__init__.py create mode 100644 astropy/extern/plugins/pytest_doctestplus/output_checker.py create mode 100644 astropy/extern/plugins/pytest_doctestplus/plugin.py create mode 100644 astropy/extern/plugins/pytest_openfiles/__init__.py create mode 100644 astropy/extern/plugins/pytest_openfiles/plugin.py create mode 100644 astropy/extern/plugins/pytest_remotedata/__init__.py create mode 100644 astropy/extern/plugins/pytest_remotedata/disable_internet.py create mode 100644 astropy/extern/plugins/pytest_remotedata/plugin.py create mode 100644 astropy/extern/ply/__init__.py create mode 100644 astropy/extern/ply/cpp.py create mode 100644 astropy/extern/ply/ctokens.py create mode 100644 astropy/extern/ply/lex.py create mode 100644 astropy/extern/ply/yacc.py create mode 100644 astropy/extern/setup_package.py create mode 100644 astropy/extern/six.py create mode 100644 astropy/io/__init__.py create mode 100644 astropy/io/ascii/__init__.py create mode 100644 astropy/io/ascii/basic.py create mode 100644 astropy/io/ascii/cds.py create mode 100644 astropy/io/ascii/connect.py create mode 100644 astropy/io/ascii/core.py create mode 100644 astropy/io/ascii/cparser.c create mode 100644 astropy/io/ascii/cparser.pyx create mode 100644 astropy/io/ascii/daophot.py create mode 100644 astropy/io/ascii/ecsv.py create mode 100644 astropy/io/ascii/fastbasic.py create mode 100644 astropy/io/ascii/fixedwidth.py create mode 100644 astropy/io/ascii/html.py create mode 100644 astropy/io/ascii/ipac.py create mode 100644 astropy/io/ascii/latex.py create mode 100644 astropy/io/ascii/misc.py create mode 100644 astropy/io/ascii/rst.py create mode 100644 astropy/io/ascii/setup_package.py create mode 100644 astropy/io/ascii/sextractor.py create mode 100644 astropy/io/ascii/src/tokenizer.c create mode 100644 astropy/io/ascii/src/tokenizer.h create mode 100644 astropy/io/ascii/tests/__init__.py create mode 100644 astropy/io/ascii/tests/common.py create mode 100644 astropy/io/ascii/tests/t/apostrophe.rdb create mode 100644 astropy/io/ascii/tests/t/apostrophe.tab create mode 100644 astropy/io/ascii/tests/t/bad.txt create mode 100644 astropy/io/ascii/tests/t/bars_at_ends.txt create mode 100644 astropy/io/ascii/tests/t/cds.dat create mode 100644 astropy/io/ascii/tests/t/cds/description/ReadMe create mode 100644 astropy/io/ascii/tests/t/cds/description/table.dat create mode 100644 astropy/io/ascii/tests/t/cds/glob/ReadMe create mode 100644 astropy/io/ascii/tests/t/cds/glob/lmxbrefs.dat create mode 100644 astropy/io/ascii/tests/t/cds/multi/ReadMe create mode 100644 astropy/io/ascii/tests/t/cds/multi/lhs2065.dat create mode 100644 astropy/io/ascii/tests/t/cds/multi/lp944-20.dat create mode 100644 astropy/io/ascii/tests/t/cds2.dat create mode 100644 astropy/io/ascii/tests/t/cds_malformed.dat create mode 100644 astropy/io/ascii/tests/t/commented_header.dat create mode 100644 astropy/io/ascii/tests/t/commented_header2.dat create mode 100644 astropy/io/ascii/tests/t/continuation.dat create mode 100644 astropy/io/ascii/tests/t/daophot.dat create mode 100644 astropy/io/ascii/tests/t/daophot.dat.gz create mode 100644 astropy/io/ascii/tests/t/daophot2.dat create mode 100644 astropy/io/ascii/tests/t/daophot3.dat create mode 100644 astropy/io/ascii/tests/t/daophot4.dat create mode 100644 astropy/io/ascii/tests/t/fill_values.txt create mode 100644 astropy/io/ascii/tests/t/fixed_width_2_line.txt create mode 100644 astropy/io/ascii/tests/t/html.html create mode 100644 astropy/io/ascii/tests/t/html2.html create mode 100644 astropy/io/ascii/tests/t/ipac.dat create mode 100644 astropy/io/ascii/tests/t/ipac.dat.bz2 create mode 100644 astropy/io/ascii/tests/t/ipac.dat.xz create mode 100644 astropy/io/ascii/tests/t/latex1.tex create mode 100644 astropy/io/ascii/tests/t/latex1.tex.gz create mode 100644 astropy/io/ascii/tests/t/latex2.tex create mode 100644 astropy/io/ascii/tests/t/latex3.tex create mode 100644 astropy/io/ascii/tests/t/nls1_stackinfo.dbout create mode 100644 astropy/io/ascii/tests/t/no_data_cds.dat create mode 100644 astropy/io/ascii/tests/t/no_data_daophot.dat create mode 100644 astropy/io/ascii/tests/t/no_data_ipac.dat create mode 100644 astropy/io/ascii/tests/t/no_data_sextractor.dat create mode 100644 astropy/io/ascii/tests/t/no_data_with_header.dat create mode 100644 astropy/io/ascii/tests/t/no_data_without_header.dat create mode 100644 astropy/io/ascii/tests/t/sextractor.dat create mode 100644 astropy/io/ascii/tests/t/sextractor2.dat create mode 100644 astropy/io/ascii/tests/t/sextractor3.dat create mode 100644 astropy/io/ascii/tests/t/short.rdb create mode 100644 astropy/io/ascii/tests/t/short.rdb.bz2 create mode 100644 astropy/io/ascii/tests/t/short.rdb.gz create mode 100644 astropy/io/ascii/tests/t/short.rdb.xz create mode 100644 astropy/io/ascii/tests/t/short.tab create mode 100644 astropy/io/ascii/tests/t/simple.txt create mode 100644 astropy/io/ascii/tests/t/simple2.txt create mode 100644 astropy/io/ascii/tests/t/simple3.txt create mode 100644 astropy/io/ascii/tests/t/simple4.txt create mode 100644 astropy/io/ascii/tests/t/simple5.txt create mode 100644 astropy/io/ascii/tests/t/simple_csv.csv create mode 100644 astropy/io/ascii/tests/t/simple_csv_missing.csv create mode 100644 astropy/io/ascii/tests/t/space_delim_blank_lines.txt create mode 100644 astropy/io/ascii/tests/t/space_delim_no_header.dat create mode 100644 astropy/io/ascii/tests/t/space_delim_no_names.dat create mode 100644 astropy/io/ascii/tests/t/test4.dat create mode 100644 astropy/io/ascii/tests/t/test5.dat create mode 100644 astropy/io/ascii/tests/t/vizier/ReadMe create mode 100644 astropy/io/ascii/tests/t/vizier/table1.dat create mode 100644 astropy/io/ascii/tests/t/vizier/table5.dat create mode 100644 astropy/io/ascii/tests/t/vots_spec.dat create mode 100644 astropy/io/ascii/tests/t/whitespace.dat create mode 100644 astropy/io/ascii/tests/test_c_reader.py create mode 100644 astropy/io/ascii/tests/test_cds_header_from_readme.py create mode 100644 astropy/io/ascii/tests/test_compressed.py create mode 100644 astropy/io/ascii/tests/test_connect.py create mode 100644 astropy/io/ascii/tests/test_ecsv.py create mode 100644 astropy/io/ascii/tests/test_fixedwidth.py create mode 100644 astropy/io/ascii/tests/test_html.py create mode 100644 astropy/io/ascii/tests/test_ipac_definitions.py create mode 100644 astropy/io/ascii/tests/test_read.py create mode 100644 astropy/io/ascii/tests/test_rst.py create mode 100644 astropy/io/ascii/tests/test_types.py create mode 100644 astropy/io/ascii/tests/test_write.py create mode 100644 astropy/io/ascii/ui.py create mode 100644 astropy/io/fits/__init__.py create mode 100644 astropy/io/fits/_numpy_hacks.py create mode 100644 astropy/io/fits/card.py create mode 100644 astropy/io/fits/column.py create mode 100644 astropy/io/fits/connect.py create mode 100644 astropy/io/fits/convenience.py create mode 100644 astropy/io/fits/diff.py create mode 100644 astropy/io/fits/file.py create mode 100644 astropy/io/fits/fitsrec.py create mode 100644 astropy/io/fits/hdu/__init__.py create mode 100644 astropy/io/fits/hdu/base.py create mode 100644 astropy/io/fits/hdu/compressed.py create mode 100644 astropy/io/fits/hdu/groups.py create mode 100644 astropy/io/fits/hdu/hdulist.py create mode 100644 astropy/io/fits/hdu/image.py create mode 100644 astropy/io/fits/hdu/nonstandard.py create mode 100644 astropy/io/fits/hdu/streaming.py create mode 100644 astropy/io/fits/hdu/table.py create mode 100644 astropy/io/fits/header.py create mode 100644 astropy/io/fits/py3compat.py create mode 100644 astropy/io/fits/scripts/__init__.py create mode 100644 astropy/io/fits/scripts/fitscheck.py create mode 100644 astropy/io/fits/scripts/fitsdiff.py create mode 100644 astropy/io/fits/scripts/fitsheader.py create mode 100644 astropy/io/fits/scripts/fitsinfo.py create mode 100644 astropy/io/fits/setup_package.py create mode 100644 astropy/io/fits/src/compressionmodule.c create mode 100644 astropy/io/fits/src/compressionmodule.h create mode 100644 astropy/io/fits/tests/__init__.py create mode 100644 astropy/io/fits/tests/cfitsio_verify.c create mode 100644 astropy/io/fits/tests/data/arange.fits create mode 100644 astropy/io/fits/tests/data/ascii.fits create mode 100644 astropy/io/fits/tests/data/blank.fits create mode 100644 astropy/io/fits/tests/data/btable.fits create mode 100644 astropy/io/fits/tests/data/checksum.fits create mode 100644 astropy/io/fits/tests/data/comp.fits create mode 100644 astropy/io/fits/tests/data/compressed_float_bzero.fits create mode 100644 astropy/io/fits/tests/data/compressed_image.fits create mode 100644 astropy/io/fits/tests/data/fixed-1890.fits create mode 100644 astropy/io/fits/tests/data/group.fits create mode 100644 astropy/io/fits/tests/data/history_header.fits create mode 100644 astropy/io/fits/tests/data/memtest.fits create mode 100644 astropy/io/fits/tests/data/o4sp040b0_raw.fits create mode 100644 astropy/io/fits/tests/data/random_groups.fits create mode 100644 astropy/io/fits/tests/data/scale.fits create mode 100644 astropy/io/fits/tests/data/stddata.fits create mode 100644 astropy/io/fits/tests/data/table.fits create mode 100644 astropy/io/fits/tests/data/tb.fits create mode 100644 astropy/io/fits/tests/data/tdim.fits create mode 100644 astropy/io/fits/tests/data/test0.fits create mode 100644 astropy/io/fits/tests/data/variable_length_table.fits create mode 100644 astropy/io/fits/tests/data/zerowidth.fits create mode 100644 astropy/io/fits/tests/test_checksum.py create mode 100644 astropy/io/fits/tests/test_connect.py create mode 100644 astropy/io/fits/tests/test_convenience.py create mode 100644 astropy/io/fits/tests/test_core.py create mode 100644 astropy/io/fits/tests/test_diff.py create mode 100644 astropy/io/fits/tests/test_division.py create mode 100644 astropy/io/fits/tests/test_fitsdiff.py create mode 100644 astropy/io/fits/tests/test_fitsheader.py create mode 100644 astropy/io/fits/tests/test_fitsinfo.py create mode 100644 astropy/io/fits/tests/test_groups.py create mode 100644 astropy/io/fits/tests/test_hdulist.py create mode 100644 astropy/io/fits/tests/test_header.py create mode 100644 astropy/io/fits/tests/test_image.py create mode 100644 astropy/io/fits/tests/test_nonstandard.py create mode 100644 astropy/io/fits/tests/test_structured.py create mode 100644 astropy/io/fits/tests/test_table.py create mode 100644 astropy/io/fits/tests/test_uint.py create mode 100644 astropy/io/fits/tests/test_util.py create mode 100644 astropy/io/fits/util.py create mode 100644 astropy/io/fits/verify.py create mode 100644 astropy/io/misc/__init__.py create mode 100644 astropy/io/misc/connect.py create mode 100644 astropy/io/misc/hdf5.py create mode 100644 astropy/io/misc/pickle_helpers.py create mode 100644 astropy/io/misc/tests/__init__.py create mode 100644 astropy/io/misc/tests/test_hdf5.py create mode 100644 astropy/io/misc/tests/test_pickle_helpers.py create mode 100644 astropy/io/misc/tests/test_yaml.py create mode 100644 astropy/io/misc/yaml.py create mode 100644 astropy/io/registry.py create mode 100644 astropy/io/setup_package.py create mode 100644 astropy/io/tests/__init__.py create mode 100644 astropy/io/tests/test_registry.py create mode 100644 astropy/io/votable/__init__.py create mode 100644 astropy/io/votable/connect.py create mode 100644 astropy/io/votable/converters.py create mode 100644 astropy/io/votable/data/VOTable.dtd create mode 100644 astropy/io/votable/data/VOTable.v1.1.xsd create mode 100644 astropy/io/votable/data/VOTable.v1.2.xsd create mode 100644 astropy/io/votable/data/VOTable.v1.3.xsd create mode 100644 astropy/io/votable/data/ucd1p-words.txt create mode 100644 astropy/io/votable/exceptions.py create mode 100755 astropy/io/votable/setup_package.py create mode 100644 astropy/io/votable/src/tablewriter.c create mode 100644 astropy/io/votable/table.py create mode 100644 astropy/io/votable/tests/__init__.py create mode 100644 astropy/io/votable/tests/converter_test.py create mode 100644 astropy/io/votable/tests/data/custom_datatype.xml create mode 100644 astropy/io/votable/tests/data/empty_table.xml create mode 100644 astropy/io/votable/tests/data/gemini.xml create mode 100644 astropy/io/votable/tests/data/irsa-nph-error.xml create mode 100644 astropy/io/votable/tests/data/irsa-nph-m31.xml create mode 100644 astropy/io/votable/tests/data/names.xml create mode 100644 astropy/io/votable/tests/data/no_resource.txt create mode 100644 astropy/io/votable/tests/data/no_resource.xml create mode 100644 astropy/io/votable/tests/data/nonstandard_units.xml create mode 100644 astropy/io/votable/tests/data/regression.bin.tabledata.truth.1.1.xml create mode 100644 astropy/io/votable/tests/data/regression.bin.tabledata.truth.1.3.xml create mode 100644 astropy/io/votable/tests/data/regression.xml create mode 100644 astropy/io/votable/tests/data/tb.fits create mode 100644 astropy/io/votable/tests/data/too_many_columns.xml.gz create mode 100644 astropy/io/votable/tests/data/validation.txt create mode 100644 astropy/io/votable/tests/exception_test.py create mode 100644 astropy/io/votable/tests/table_test.py create mode 100644 astropy/io/votable/tests/tree_test.py create mode 100644 astropy/io/votable/tests/ucd_test.py create mode 100644 astropy/io/votable/tests/util_test.py create mode 100644 astropy/io/votable/tests/vo_test.py create mode 100644 astropy/io/votable/tree.py create mode 100644 astropy/io/votable/ucd.py create mode 100644 astropy/io/votable/util.py create mode 100644 astropy/io/votable/validator/__init__.py create mode 100644 astropy/io/votable/validator/html.py create mode 100644 astropy/io/votable/validator/main.py create mode 100644 astropy/io/votable/validator/result.py create mode 100644 astropy/io/votable/validator/urls/cone.big.dat.gz create mode 100644 astropy/io/votable/validator/urls/cone.broken.dat.gz create mode 100644 astropy/io/votable/validator/urls/cone.good.dat.gz create mode 100644 astropy/io/votable/validator/urls/cone.incorrect.dat.gz create mode 100644 astropy/io/votable/volint.py create mode 100644 astropy/io/votable/xmlutil.py create mode 100644 astropy/logger.py create mode 100644 astropy/modeling/__init__.py create mode 100644 astropy/modeling/blackbody.py create mode 100644 astropy/modeling/core.py create mode 100644 astropy/modeling/fitting.py create mode 100644 astropy/modeling/functional_models.py create mode 100644 astropy/modeling/mappings.py create mode 100644 astropy/modeling/models.py create mode 100644 astropy/modeling/optimizers.py create mode 100644 astropy/modeling/parameters.py create mode 100644 astropy/modeling/polynomial.py create mode 100644 astropy/modeling/powerlaws.py create mode 100644 astropy/modeling/projections.py create mode 100644 astropy/modeling/rotations.py create mode 100644 astropy/modeling/setup_package.py create mode 100644 astropy/modeling/src/projections.c create mode 100644 astropy/modeling/src/projections.c.templ create mode 100644 astropy/modeling/src/wcsconfig.h create mode 100644 astropy/modeling/statistic.py create mode 100644 astropy/modeling/tabular.py create mode 100644 astropy/modeling/tests/__init__.py create mode 100644 astropy/modeling/tests/data/1904-66_AZP.fits create mode 100644 astropy/modeling/tests/data/__init__.py create mode 100644 astropy/modeling/tests/data/hst_sip.hdr create mode 100644 astropy/modeling/tests/data/idcompspec.fits create mode 100644 astropy/modeling/tests/data/irac_sip.hdr create mode 100644 astropy/modeling/tests/example_models.py create mode 100644 astropy/modeling/tests/irafutil.py create mode 100644 astropy/modeling/tests/test_blackbody.py create mode 100644 astropy/modeling/tests/test_compound.py create mode 100644 astropy/modeling/tests/test_constraints.py create mode 100644 astropy/modeling/tests/test_core.py create mode 100644 astropy/modeling/tests/test_fitters.py create mode 100644 astropy/modeling/tests/test_functional_models.py create mode 100644 astropy/modeling/tests/test_input.py create mode 100644 astropy/modeling/tests/test_mappings.py create mode 100644 astropy/modeling/tests/test_models.py create mode 100644 astropy/modeling/tests/test_models_quantities.py create mode 100644 astropy/modeling/tests/test_parameters.py create mode 100644 astropy/modeling/tests/test_polynomial.py create mode 100644 astropy/modeling/tests/test_projections.py create mode 100644 astropy/modeling/tests/test_quantities_evaluation.py create mode 100644 astropy/modeling/tests/test_quantities_fitting.py create mode 100644 astropy/modeling/tests/test_quantities_model.py create mode 100644 astropy/modeling/tests/test_quantities_parameters.py create mode 100644 astropy/modeling/tests/test_quantities_rotations.py create mode 100644 astropy/modeling/tests/test_rotations.py create mode 100644 astropy/modeling/tests/test_utils.py create mode 100644 astropy/modeling/tests/utils.py create mode 100644 astropy/modeling/utils.py create mode 100644 astropy/nddata/__init__.py create mode 100644 astropy/nddata/ccddata.py create mode 100644 astropy/nddata/compat.py create mode 100644 astropy/nddata/decorators.py create mode 100644 astropy/nddata/flag_collection.py create mode 100644 astropy/nddata/mixins/__init__.py create mode 100644 astropy/nddata/mixins/ndarithmetic.py create mode 100644 astropy/nddata/mixins/ndio.py create mode 100644 astropy/nddata/mixins/ndslicing.py create mode 100644 astropy/nddata/mixins/tests/__init__.py create mode 100644 astropy/nddata/mixins/tests/test_ndarithmetic.py create mode 100644 astropy/nddata/mixins/tests/test_ndio.py create mode 100644 astropy/nddata/mixins/tests/test_ndslicing.py create mode 100644 astropy/nddata/nddata.py create mode 100644 astropy/nddata/nddata_base.py create mode 100644 astropy/nddata/nddata_withmixins.py create mode 100644 astropy/nddata/nduncertainty.py create mode 100644 astropy/nddata/setup_package.py create mode 100644 astropy/nddata/tests/__init__.py create mode 100644 astropy/nddata/tests/data/sip-wcs.fits create mode 100644 astropy/nddata/tests/test_ccddata.py create mode 100644 astropy/nddata/tests/test_compat.py create mode 100644 astropy/nddata/tests/test_decorators.py create mode 100644 astropy/nddata/tests/test_flag_collection.py create mode 100644 astropy/nddata/tests/test_nddata.py create mode 100644 astropy/nddata/tests/test_nddata_base.py create mode 100644 astropy/nddata/tests/test_nduncertainty.py create mode 100644 astropy/nddata/tests/test_utils.py create mode 100644 astropy/nddata/utils.py create mode 100644 astropy/samp/__init__.py create mode 100644 astropy/samp/client.py create mode 100644 astropy/samp/constants.py create mode 100644 astropy/samp/data/astropy_icon.png create mode 100644 astropy/samp/data/clientaccesspolicy.xml create mode 100644 astropy/samp/data/crossdomain.xml create mode 100644 astropy/samp/errors.py create mode 100644 astropy/samp/hub.py create mode 100644 astropy/samp/hub_proxy.py create mode 100644 astropy/samp/hub_script.py create mode 100644 astropy/samp/integrated_client.py create mode 100644 astropy/samp/lockfile_helpers.py create mode 100644 astropy/samp/setup_package.py create mode 100644 astropy/samp/standard_profile.py create mode 100644 astropy/samp/tests/__init__.py create mode 100644 astropy/samp/tests/test_client.py create mode 100644 astropy/samp/tests/test_errors.py create mode 100644 astropy/samp/tests/test_helpers.py create mode 100644 astropy/samp/tests/test_hub.py create mode 100644 astropy/samp/tests/test_hub_proxy.py create mode 100644 astropy/samp/tests/test_hub_script.py create mode 100644 astropy/samp/tests/test_standard_profile.py create mode 100644 astropy/samp/tests/test_web_profile.py create mode 100644 astropy/samp/tests/web_profile_test_helpers.py create mode 100644 astropy/samp/utils.py create mode 100644 astropy/samp/web_profile.py create mode 100644 astropy/setup_package.py create mode 100644 astropy/stats/__init__.py create mode 100644 astropy/stats/bayesian_blocks.py create mode 100644 astropy/stats/biweight.py create mode 100644 astropy/stats/circstats.py create mode 100644 astropy/stats/funcs.py create mode 100644 astropy/stats/histogram.py create mode 100644 astropy/stats/info_theory.py create mode 100644 astropy/stats/jackknife.py create mode 100644 astropy/stats/lombscargle/__init__.py create mode 100644 astropy/stats/lombscargle/core.py create mode 100644 astropy/stats/lombscargle/implementations/__init__.py create mode 100644 astropy/stats/lombscargle/implementations/chi2_impl.py create mode 100644 astropy/stats/lombscargle/implementations/cython_impl.c create mode 100644 astropy/stats/lombscargle/implementations/cython_impl.pyx create mode 100644 astropy/stats/lombscargle/implementations/fast_impl.py create mode 100644 astropy/stats/lombscargle/implementations/fastchi2_impl.py create mode 100644 astropy/stats/lombscargle/implementations/main.py create mode 100644 astropy/stats/lombscargle/implementations/mle.py create mode 100644 astropy/stats/lombscargle/implementations/scipy_impl.py create mode 100644 astropy/stats/lombscargle/implementations/slow_impl.py create mode 100644 astropy/stats/lombscargle/implementations/tests/__init__.py create mode 100644 astropy/stats/lombscargle/implementations/tests/test_mle.py create mode 100644 astropy/stats/lombscargle/implementations/tests/test_utils.py create mode 100644 astropy/stats/lombscargle/implementations/utils.py create mode 100644 astropy/stats/lombscargle/tests/__init__.py create mode 100644 astropy/stats/lombscargle/tests/test_lombscargle.py create mode 100644 astropy/stats/setup_package.py create mode 100644 astropy/stats/sigma_clipping.py create mode 100644 astropy/stats/spatial.py create mode 100644 astropy/stats/tests/__init__.py create mode 100644 astropy/stats/tests/test_bayesian_blocks.py create mode 100644 astropy/stats/tests/test_biweight.py create mode 100644 astropy/stats/tests/test_circstats.py create mode 100644 astropy/stats/tests/test_funcs.py create mode 100644 astropy/stats/tests/test_histogram.py create mode 100644 astropy/stats/tests/test_info_theory.py create mode 100644 astropy/stats/tests/test_jackknife.py create mode 100644 astropy/stats/tests/test_sigma_clipping.py create mode 100644 astropy/stats/tests/test_spatial.py create mode 100644 astropy/table/__init__.py create mode 100644 astropy/table/_column_mixins.c create mode 100644 astropy/table/_column_mixins.pyx create mode 100644 astropy/table/_np_utils.c create mode 100644 astropy/table/_np_utils.pyx create mode 100644 astropy/table/bst.py create mode 100644 astropy/table/column.py create mode 100644 astropy/table/groups.py create mode 100644 astropy/table/index.py create mode 100644 astropy/table/info.py create mode 100644 astropy/table/jsviewer.py create mode 100644 astropy/table/meta.py create mode 100644 astropy/table/np_utils.py create mode 100644 astropy/table/operations.py create mode 100644 astropy/table/pprint.py create mode 100644 astropy/table/row.py create mode 100644 astropy/table/serialize.py create mode 100644 astropy/table/setup_package.py create mode 100644 astropy/table/sorted_array.py create mode 100644 astropy/table/table.py create mode 100644 astropy/table/table_helpers.py create mode 100644 astropy/table/tests/__init__.py create mode 100644 astropy/table/tests/conftest.py create mode 100644 astropy/table/tests/test_array.py create mode 100644 astropy/table/tests/test_bst.py create mode 100644 astropy/table/tests/test_column.py create mode 100644 astropy/table/tests/test_groups.py create mode 100644 astropy/table/tests/test_index.py create mode 100644 astropy/table/tests/test_info.py create mode 100644 astropy/table/tests/test_init_table.py create mode 100644 astropy/table/tests/test_item_access.py create mode 100644 astropy/table/tests/test_jsviewer.py create mode 100644 astropy/table/tests/test_masked.py create mode 100644 astropy/table/tests/test_mixin.py create mode 100644 astropy/table/tests/test_np_utils.py create mode 100644 astropy/table/tests/test_operations.py create mode 100644 astropy/table/tests/test_pickle.py create mode 100644 astropy/table/tests/test_pprint.py create mode 100644 astropy/table/tests/test_row.py create mode 100644 astropy/table/tests/test_subclass.py create mode 100644 astropy/table/tests/test_table.py create mode 100644 astropy/tests/__init__.py create mode 100644 astropy/tests/command.py create mode 100644 astropy/tests/coveragerc create mode 100644 astropy/tests/disable_internet.py create mode 100644 astropy/tests/helper.py create mode 100644 astropy/tests/image_tests.py create mode 100644 astropy/tests/output_checker.py create mode 100644 astropy/tests/pytest_plugins.py create mode 100644 astropy/tests/pytest_repeat.py create mode 100644 astropy/tests/runner.py create mode 100644 astropy/tests/setup_package.py create mode 100644 astropy/tests/test_logger.py create mode 100644 astropy/tests/tests/__init__.py create mode 100644 astropy/tests/tests/data/open_file_detection.txt create mode 100644 astropy/tests/tests/test_imports.py create mode 100644 astropy/tests/tests/test_open_file_detection.py create mode 100644 astropy/tests/tests/test_quantity_helpers.py create mode 100644 astropy/tests/tests/test_run_tests.py create mode 100644 astropy/tests/tests/test_runner.py create mode 100644 astropy/tests/tests/test_skip_remote_data.py create mode 100644 astropy/tests/tests/test_socketblocker.py create mode 100644 astropy/time/__init__.py create mode 100644 astropy/time/core.py create mode 100644 astropy/time/formats.py create mode 100644 astropy/time/setup_package.py create mode 100644 astropy/time/tests/__init__.py create mode 100644 astropy/time/tests/test_basic.py create mode 100644 astropy/time/tests/test_comparisons.py create mode 100644 astropy/time/tests/test_corrs.py create mode 100644 astropy/time/tests/test_delta.py create mode 100644 astropy/time/tests/test_guess.py create mode 100644 astropy/time/tests/test_methods.py create mode 100644 astropy/time/tests/test_pickle.py create mode 100644 astropy/time/tests/test_precision.py create mode 100644 astropy/time/tests/test_quantity_interaction.py create mode 100644 astropy/time/tests/test_sidereal.py create mode 100644 astropy/time/tests/test_ut1.py create mode 100644 astropy/time/utils.py create mode 100644 astropy/units/__init__.py create mode 100644 astropy/units/astrophys.py create mode 100644 astropy/units/cds.py create mode 100644 astropy/units/cgs.py create mode 100644 astropy/units/core.py create mode 100644 astropy/units/decorators.py create mode 100644 astropy/units/deprecated.py create mode 100644 astropy/units/equivalencies.py create mode 100644 astropy/units/format/__init__.py create mode 100644 astropy/units/format/base.py create mode 100644 astropy/units/format/cds.py create mode 100644 astropy/units/format/cds_lextab.py create mode 100644 astropy/units/format/cds_parsetab.py create mode 100644 astropy/units/format/console.py create mode 100644 astropy/units/format/fits.py create mode 100644 astropy/units/format/generic.py create mode 100644 astropy/units/format/generic_lextab.py create mode 100644 astropy/units/format/generic_parsetab.py create mode 100644 astropy/units/format/latex.py create mode 100644 astropy/units/format/ogip.py create mode 100644 astropy/units/format/ogip_lextab.py create mode 100644 astropy/units/format/ogip_parsetab.py create mode 100644 astropy/units/format/unicode_format.py create mode 100644 astropy/units/format/utils.py create mode 100644 astropy/units/format/vounit.py create mode 100644 astropy/units/function/__init__.py create mode 100644 astropy/units/function/core.py create mode 100644 astropy/units/function/logarithmic.py create mode 100644 astropy/units/function/magnitude_zero_points.py create mode 100644 astropy/units/function/mixin.py create mode 100644 astropy/units/function/units.py create mode 100644 astropy/units/imperial.py create mode 100644 astropy/units/physical.py create mode 100644 astropy/units/quantity.py create mode 100644 astropy/units/quantity_helper.py create mode 100644 astropy/units/required_by_vounit.py create mode 100644 astropy/units/setup_package.py create mode 100644 astropy/units/si.py create mode 100644 astropy/units/tests/__init__.py create mode 100644 astropy/units/tests/py3_test_quantity_annotations.py create mode 100644 astropy/units/tests/test_deprecated.py create mode 100644 astropy/units/tests/test_equivalencies.py create mode 100644 astropy/units/tests/test_format.py create mode 100644 astropy/units/tests/test_logarithmic.py create mode 100644 astropy/units/tests/test_physical.py create mode 100644 astropy/units/tests/test_quantity.py create mode 100644 astropy/units/tests/test_quantity_array_methods.py create mode 100644 astropy/units/tests/test_quantity_decorator.py create mode 100644 astropy/units/tests/test_quantity_non_ufuncs.py create mode 100644 astropy/units/tests/test_quantity_ufuncs.py create mode 100644 astropy/units/tests/test_units.py create mode 100644 astropy/units/utils.py create mode 100644 astropy/utils/__init__.py create mode 100644 astropy/utils/argparse.py create mode 100644 astropy/utils/codegen.py create mode 100644 astropy/utils/collections.py create mode 100644 astropy/utils/compat/__init__.py create mode 100644 astropy/utils/compat/_funcsigs.py create mode 100644 astropy/utils/compat/funcsigs.py create mode 100644 astropy/utils/compat/futures/__init__.py create mode 100644 astropy/utils/compat/futures/_base.py create mode 100644 astropy/utils/compat/futures/process.py create mode 100644 astropy/utils/compat/futures/thread.py create mode 100644 astropy/utils/compat/misc.py create mode 100644 astropy/utils/compat/numpy/__init__.py create mode 100644 astropy/utils/compat/numpy/core/__init__.py create mode 100644 astropy/utils/compat/numpy/core/multiarray.py create mode 100644 astropy/utils/compat/numpy/lib/__init__.py create mode 100644 astropy/utils/compat/numpy/lib/stride_tricks.py create mode 100644 astropy/utils/compat/numpy/tests/__init__.py create mode 100644 astropy/utils/compat/numpy/tests/test_broadcast_arrays.py create mode 100644 astropy/utils/compat/numpy/tests/test_matmul.py create mode 100644 astropy/utils/compat/numpycompat.py create mode 100644 astropy/utils/console.py create mode 100644 astropy/utils/data.py create mode 100644 astropy/utils/data_info.py create mode 100644 astropy/utils/decorators.py create mode 100644 astropy/utils/exceptions.py create mode 100644 astropy/utils/iers/__init__.py create mode 100644 astropy/utils/iers/data/ReadMe.eopc04_IAU2000 create mode 100644 astropy/utils/iers/data/ReadMe.finals2000A create mode 100644 astropy/utils/iers/data/eopc04_IAU2000.62-now create mode 100644 astropy/utils/iers/iers.py create mode 100644 astropy/utils/iers/tests/__init__.py create mode 100644 astropy/utils/iers/tests/finals2000A-2016-02-30-test create mode 100644 astropy/utils/iers/tests/finals2000A-2016-04-30-test create mode 100644 astropy/utils/iers/tests/iers_a_excerpt create mode 100644 astropy/utils/iers/tests/test_iers.py create mode 100644 astropy/utils/introspection.py create mode 100644 astropy/utils/metadata.py create mode 100644 astropy/utils/misc.py create mode 100644 astropy/utils/setup_package.py create mode 100644 astropy/utils/src/compiler.c create mode 100644 astropy/utils/state.py create mode 100644 astropy/utils/tests/__init__.py create mode 100644 astropy/utils/tests/data/.hidden_file.txt create mode 100644 astropy/utils/tests/data/alias.cfg create mode 100644 astropy/utils/tests/data/local.dat create mode 100644 astropy/utils/tests/data/local.dat.bz2 create mode 100644 astropy/utils/tests/data/local.dat.gz create mode 100644 astropy/utils/tests/data/local.dat.xz create mode 100644 astropy/utils/tests/data/test_package/__init__.py create mode 100644 astropy/utils/tests/data/test_package/data/foo.txt create mode 100644 astropy/utils/tests/data/unicode.txt create mode 100644 astropy/utils/tests/data/unicode.txt.bz2 create mode 100644 astropy/utils/tests/data/unicode.txt.gz create mode 100644 astropy/utils/tests/data/unicode.txt.xz create mode 100644 astropy/utils/tests/test_codegen.py create mode 100644 astropy/utils/tests/test_collections.py create mode 100644 astropy/utils/tests/test_console.py create mode 100644 astropy/utils/tests/test_data.py create mode 100644 astropy/utils/tests/test_data_info.py create mode 100644 astropy/utils/tests/test_decorators.py create mode 100644 astropy/utils/tests/test_introspection.py create mode 100644 astropy/utils/tests/test_metadata.py create mode 100644 astropy/utils/tests/test_misc.py create mode 100644 astropy/utils/tests/test_timer.py create mode 100644 astropy/utils/tests/test_xml.py create mode 100644 astropy/utils/timer.py create mode 100644 astropy/utils/xml/__init__.py create mode 100644 astropy/utils/xml/check.py create mode 100644 astropy/utils/xml/iterparser.py create mode 100644 astropy/utils/xml/setup_package.py create mode 100644 astropy/utils/xml/src/expat_config.h create mode 100644 astropy/utils/xml/src/iterparse.c create mode 100644 astropy/utils/xml/src/iterparse.map create mode 100644 astropy/utils/xml/tests/__init__.py create mode 100644 astropy/utils/xml/tests/test_iterparse.py create mode 100644 astropy/utils/xml/unescaper.py create mode 100644 astropy/utils/xml/validate.py create mode 100644 astropy/utils/xml/writer.py create mode 100644 astropy/version.py create mode 100644 astropy/visualization/__init__.py create mode 100644 astropy/visualization/hist.py create mode 100644 astropy/visualization/interval.py create mode 100644 astropy/visualization/lupton_rgb.py create mode 100644 astropy/visualization/mpl_normalize.py create mode 100644 astropy/visualization/mpl_style.py create mode 100644 astropy/visualization/scripts/__init__.py create mode 100644 astropy/visualization/scripts/fits2bitmap.py create mode 100644 astropy/visualization/scripts/tests/__init__.py create mode 100644 astropy/visualization/scripts/tests/test_fits2bitmap.py create mode 100644 astropy/visualization/stretch.py create mode 100644 astropy/visualization/tests/__init__.py create mode 100644 astropy/visualization/tests/test_histogram.py create mode 100644 astropy/visualization/tests/test_interval.py create mode 100644 astropy/visualization/tests/test_lupton_rgb.py create mode 100644 astropy/visualization/tests/test_norm.py create mode 100644 astropy/visualization/tests/test_stretch.py create mode 100644 astropy/visualization/tests/test_units.py create mode 100644 astropy/visualization/transform.py create mode 100644 astropy/visualization/units.py create mode 100644 astropy/visualization/wcsaxes/__init__.py create mode 100644 astropy/visualization/wcsaxes/axislabels.py create mode 100644 astropy/visualization/wcsaxes/coordinate_helpers.py create mode 100644 astropy/visualization/wcsaxes/coordinate_range.py create mode 100644 astropy/visualization/wcsaxes/coordinates_map.py create mode 100644 astropy/visualization/wcsaxes/core.py create mode 100644 astropy/visualization/wcsaxes/formatter_locator.py create mode 100644 astropy/visualization/wcsaxes/frame.py create mode 100644 astropy/visualization/wcsaxes/grid_paths.py create mode 100644 astropy/visualization/wcsaxes/patches.py create mode 100644 astropy/visualization/wcsaxes/tests/__init__.py create mode 100644 astropy/visualization/wcsaxes/tests/data/2MASS_k_header create mode 100644 astropy/visualization/wcsaxes/tests/data/cube_header create mode 100644 astropy/visualization/wcsaxes/tests/data/msx_header create mode 100644 astropy/visualization/wcsaxes/tests/data/rosat_header create mode 100644 astropy/visualization/wcsaxes/tests/data/slice_header create mode 100644 astropy/visualization/wcsaxes/tests/datasets.py create mode 100644 astropy/visualization/wcsaxes/tests/setup_package.py create mode 100644 astropy/visualization/wcsaxes/tests/test_coordinate_helpers.py create mode 100644 astropy/visualization/wcsaxes/tests/test_display_world_coordinates.py create mode 100644 astropy/visualization/wcsaxes/tests/test_formatter_locator.py create mode 100644 astropy/visualization/wcsaxes/tests/test_frame.py create mode 100644 astropy/visualization/wcsaxes/tests/test_images.py create mode 100644 astropy/visualization/wcsaxes/tests/test_misc.py create mode 100644 astropy/visualization/wcsaxes/tests/test_transform_coord_meta.py create mode 100644 astropy/visualization/wcsaxes/tests/test_transforms.py create mode 100644 astropy/visualization/wcsaxes/tests/test_utils.py create mode 100644 astropy/visualization/wcsaxes/ticklabels.py create mode 100644 astropy/visualization/wcsaxes/ticks.py create mode 100644 astropy/visualization/wcsaxes/transforms.py create mode 100644 astropy/visualization/wcsaxes/utils.py create mode 100644 astropy/vo/__init__.py create mode 100644 astropy/vo/client/__init__.py create mode 100644 astropy/vo/client/async.py create mode 100644 astropy/vo/client/conesearch.py create mode 100644 astropy/vo/client/exceptions.py create mode 100755 astropy/vo/client/setup_package.py create mode 100644 astropy/vo/client/tests/__init__.py create mode 100644 astropy/vo/client/tests/data/basic.json create mode 100644 astropy/vo/client/tests/data/conesearch_error1.xml create mode 100644 astropy/vo/client/tests/data/conesearch_error2.xml create mode 100644 astropy/vo/client/tests/data/conesearch_error3.xml create mode 100644 astropy/vo/client/tests/data/conesearch_error4.xml create mode 100644 astropy/vo/client/tests/test_conesearch.py create mode 100644 astropy/vo/client/tests/test_vos_catalog.py create mode 100644 astropy/vo/client/vos_catalog.py create mode 100644 astropy/vo/samp/__init__.py create mode 100644 astropy/vo/validator/__init__.py create mode 100644 astropy/vo/validator/data/conesearch_urls.txt create mode 100644 astropy/vo/validator/exceptions.py create mode 100644 astropy/vo/validator/inspect.py create mode 100755 astropy/vo/validator/setup_package.py create mode 100644 astropy/vo/validator/tests/__init__.py create mode 100644 astropy/vo/validator/tests/data/conesearch_error.json create mode 100644 astropy/vo/validator/tests/data/conesearch_exception.json create mode 100644 astropy/vo/validator/tests/data/conesearch_good.json create mode 100644 astropy/vo/validator/tests/data/conesearch_warn.json create mode 100644 astropy/vo/validator/tests/data/listcats1.out create mode 100644 astropy/vo/validator/tests/data/listcats2.out create mode 100644 astropy/vo/validator/tests/data/printcat.out create mode 100644 astropy/vo/validator/tests/data/tally.out create mode 100644 astropy/vo/validator/tests/data/vao_conesearch_sites_121107_subset.xml create mode 100644 astropy/vo/validator/tests/test_inpect.py create mode 100644 astropy/vo/validator/tests/test_validate.py create mode 100644 astropy/vo/validator/tstquery.py create mode 100644 astropy/vo/validator/validate.py create mode 100644 astropy/wcs/__init__.py create mode 100644 astropy/wcs/_docutil.py create mode 100644 astropy/wcs/docstrings.py create mode 100644 astropy/wcs/include/astropy_wcs/astropy_wcs.h create mode 100644 astropy/wcs/include/astropy_wcs/astropy_wcs_api.h create mode 100644 astropy/wcs/include/astropy_wcs/distortion.h create mode 100644 astropy/wcs/include/astropy_wcs/distortion_wrap.h create mode 100644 astropy/wcs/include/astropy_wcs/docstrings.h create mode 100644 astropy/wcs/include/astropy_wcs/isnan.h create mode 100644 astropy/wcs/include/astropy_wcs/pipeline.h create mode 100644 astropy/wcs/include/astropy_wcs/pyutil.h create mode 100644 astropy/wcs/include/astropy_wcs/sip.h create mode 100644 astropy/wcs/include/astropy_wcs/sip_wrap.h create mode 100644 astropy/wcs/include/astropy_wcs/str_list_proxy.h create mode 100644 astropy/wcs/include/astropy_wcs/unit_list_proxy.h create mode 100644 astropy/wcs/include/astropy_wcs/util.h create mode 100644 astropy/wcs/include/astropy_wcs/wcsconfig.h create mode 100644 astropy/wcs/include/astropy_wcs/wcslib_tabprm_wrap.h create mode 100644 astropy/wcs/include/astropy_wcs/wcslib_units_wrap.h create mode 100644 astropy/wcs/include/astropy_wcs/wcslib_wrap.h create mode 100644 astropy/wcs/include/astropy_wcs/wcslib_wtbarr_wrap.h create mode 100644 astropy/wcs/include/astropy_wcs_api.h create mode 100644 astropy/wcs/include/wcsconfig.h create mode 100644 astropy/wcs/include/wcslib/cel.h create mode 100644 astropy/wcs/include/wcslib/lin.h create mode 100644 astropy/wcs/include/wcslib/prj.h create mode 100644 astropy/wcs/include/wcslib/spc.h create mode 100644 astropy/wcs/include/wcslib/spx.h create mode 100644 astropy/wcs/include/wcslib/tab.h create mode 100644 astropy/wcs/include/wcslib/wcs.h create mode 100644 astropy/wcs/include/wcslib/wcserr.h create mode 100644 astropy/wcs/include/wcslib/wcsmath.h create mode 100644 astropy/wcs/include/wcslib/wcsprintf.h create mode 100644 astropy/wcs/setup_package.py create mode 100644 astropy/wcs/src/astropy_wcs.c create mode 100644 astropy/wcs/src/astropy_wcs_api.c create mode 100644 astropy/wcs/src/distortion.c create mode 100644 astropy/wcs/src/distortion_wrap.c create mode 100644 astropy/wcs/src/docstrings.c create mode 100644 astropy/wcs/src/pipeline.c create mode 100644 astropy/wcs/src/pyutil.c create mode 100644 astropy/wcs/src/sip.c create mode 100644 astropy/wcs/src/sip_wrap.c create mode 100644 astropy/wcs/src/str_list_proxy.c create mode 100644 astropy/wcs/src/unit_list_proxy.c create mode 100644 astropy/wcs/src/util.c create mode 100644 astropy/wcs/src/wcslib_tabprm_wrap.c create mode 100644 astropy/wcs/src/wcslib_wrap.c create mode 100644 astropy/wcs/tests/__init__.py create mode 100644 astropy/wcs/tests/data/2wcses.hdr create mode 100644 astropy/wcs/tests/data/3d_cd.hdr create mode 100644 astropy/wcs/tests/data/defunct_keywords.hdr create mode 100644 astropy/wcs/tests/data/dist.fits create mode 100644 astropy/wcs/tests/data/dist_lookup.fits.gz create mode 100644 astropy/wcs/tests/data/header_newlines.fits create mode 100644 astropy/wcs/tests/data/invalid_header.hdr create mode 100644 astropy/wcs/tests/data/irac_sip.hdr create mode 100644 astropy/wcs/tests/data/j94f05bgq_flt.fits create mode 100644 astropy/wcs/tests/data/locale.hdr create mode 100644 astropy/wcs/tests/data/nonstandard_units.hdr create mode 100644 astropy/wcs/tests/data/outside_sky.hdr create mode 100644 astropy/wcs/tests/data/sip-broken.hdr create mode 100644 astropy/wcs/tests/data/sip.fits create mode 100644 astropy/wcs/tests/data/sip2.fits create mode 100644 astropy/wcs/tests/data/siponly.hdr create mode 100644 astropy/wcs/tests/data/sub-segfault.hdr create mode 100644 astropy/wcs/tests/data/too_many_pv.hdr create mode 100644 astropy/wcs/tests/data/tpvonly.hdr create mode 100644 astropy/wcs/tests/data/unit.hdr create mode 100644 astropy/wcs/tests/data/validate.5.0.txt create mode 100644 astropy/wcs/tests/data/validate.5.13.txt create mode 100644 astropy/wcs/tests/data/validate.fits create mode 100644 astropy/wcs/tests/data/validate.txt create mode 100644 astropy/wcs/tests/data/zpn-hole.hdr create mode 100644 astropy/wcs/tests/extension/__init__.py create mode 100644 astropy/wcs/tests/extension/setup.py create mode 100644 astropy/wcs/tests/extension/test_extension.py create mode 100644 astropy/wcs/tests/extension/wcsapi_test.c create mode 100644 astropy/wcs/tests/maps/1904-66_AIR.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_AIT.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_ARC.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_AZP.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_BON.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_CAR.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_CEA.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_COD.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_COE.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_COO.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_COP.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_CSC.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_CYP.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_HPX.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_MER.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_MOL.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_NCP.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_PAR.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_PCO.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_QSC.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_SFL.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_SIN.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_STG.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_SZP.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_TAN.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_TSC.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_ZEA.hdr create mode 100644 astropy/wcs/tests/maps/1904-66_ZPN.hdr create mode 100644 astropy/wcs/tests/spectra/orion-freq-1.hdr create mode 100644 astropy/wcs/tests/spectra/orion-freq-4.hdr create mode 100644 astropy/wcs/tests/spectra/orion-velo-1.hdr create mode 100644 astropy/wcs/tests/spectra/orion-velo-4.hdr create mode 100644 astropy/wcs/tests/spectra/orion-wave-1.hdr create mode 100644 astropy/wcs/tests/spectra/orion-wave-4.hdr create mode 100644 astropy/wcs/tests/test_pickle.py create mode 100644 astropy/wcs/tests/test_profiling.py create mode 100644 astropy/wcs/tests/test_utils.py create mode 100644 astropy/wcs/tests/test_wcs.py create mode 100644 astropy/wcs/tests/test_wcsprm.py create mode 100644 astropy/wcs/utils.py create mode 100644 astropy/wcs/wcs.py create mode 100644 astropy/wcs/wcslint.py create mode 100644 astropy_helpers/CHANGES.rst create mode 100644 astropy_helpers/LICENSE.rst create mode 100644 astropy_helpers/README.rst create mode 100644 astropy_helpers/ah_bootstrap.py create mode 100644 astropy_helpers/astropy_helpers.egg-info/PKG-INFO create mode 100644 astropy_helpers/astropy_helpers.egg-info/SOURCES.txt create mode 100644 astropy_helpers/astropy_helpers.egg-info/dependency_links.txt create mode 100644 astropy_helpers/astropy_helpers.egg-info/not-zip-safe create mode 100644 astropy_helpers/astropy_helpers.egg-info/top_level.txt create mode 100644 astropy_helpers/astropy_helpers/__init__.py create mode 100644 astropy_helpers/astropy_helpers/commands/__init__.py create mode 100644 astropy_helpers/astropy_helpers/commands/_dummy.py create mode 100644 astropy_helpers/astropy_helpers/commands/_test_compat.py create mode 100644 astropy_helpers/astropy_helpers/commands/build_ext.py create mode 100644 astropy_helpers/astropy_helpers/commands/build_py.py create mode 100644 astropy_helpers/astropy_helpers/commands/build_sphinx.py create mode 100644 astropy_helpers/astropy_helpers/commands/install.py create mode 100644 astropy_helpers/astropy_helpers/commands/install_lib.py create mode 100644 astropy_helpers/astropy_helpers/commands/register.py create mode 100644 astropy_helpers/astropy_helpers/commands/setup_package.py create mode 100644 astropy_helpers/astropy_helpers/commands/src/compiler.c create mode 100644 astropy_helpers/astropy_helpers/commands/test.py create mode 100644 astropy_helpers/astropy_helpers/compat/__init__.py create mode 100644 astropy_helpers/astropy_helpers/distutils_helpers.py create mode 100644 astropy_helpers/astropy_helpers/extern/__init__.py create mode 100644 astropy_helpers/astropy_helpers/extern/automodapi/__init__.py create mode 100644 astropy_helpers/astropy_helpers/extern/automodapi/autodoc_enhancements.py create mode 100644 astropy_helpers/astropy_helpers/extern/automodapi/automodapi.py create mode 100644 astropy_helpers/astropy_helpers/extern/automodapi/automodsumm.py create mode 100644 astropy_helpers/astropy_helpers/extern/automodapi/smart_resolver.py create mode 100644 astropy_helpers/astropy_helpers/extern/automodapi/templates/autosummary_core/base.rst create mode 100644 astropy_helpers/astropy_helpers/extern/automodapi/templates/autosummary_core/class.rst create mode 100644 astropy_helpers/astropy_helpers/extern/automodapi/templates/autosummary_core/module.rst create mode 100644 astropy_helpers/astropy_helpers/extern/automodapi/utils.py create mode 100644 astropy_helpers/astropy_helpers/extern/numpydoc/__init__.py create mode 100644 astropy_helpers/astropy_helpers/extern/numpydoc/docscrape.py create mode 100644 astropy_helpers/astropy_helpers/extern/numpydoc/docscrape_sphinx.py create mode 100644 astropy_helpers/astropy_helpers/extern/numpydoc/numpydoc.py create mode 100644 astropy_helpers/astropy_helpers/extern/numpydoc/templates/numpydoc_docstring.rst create mode 100644 astropy_helpers/astropy_helpers/extern/setup_package.py create mode 100644 astropy_helpers/astropy_helpers/git_helpers.py create mode 100644 astropy_helpers/astropy_helpers/openmp_helpers.py create mode 100644 astropy_helpers/astropy_helpers/setup_helpers.py create mode 100644 astropy_helpers/astropy_helpers/sphinx/__init__.py create mode 100644 astropy_helpers/astropy_helpers/sphinx/conf.py create mode 100644 astropy_helpers/astropy_helpers/sphinx/ext/__init__.py create mode 100644 astropy_helpers/astropy_helpers/sphinx/ext/changelog_links.py create mode 100644 astropy_helpers/astropy_helpers/sphinx/ext/doctest.py create mode 100644 astropy_helpers/astropy_helpers/sphinx/ext/edit_on_github.py create mode 100644 astropy_helpers/astropy_helpers/sphinx/ext/tests/__init__.py create mode 100644 astropy_helpers/astropy_helpers/sphinx/ext/tocdepthfix.py create mode 100644 astropy_helpers/astropy_helpers/sphinx/local/python2_local_links.inv create mode 100644 astropy_helpers/astropy_helpers/sphinx/local/python2_local_links.txt create mode 100644 astropy_helpers/astropy_helpers/sphinx/local/python3_local_links.inv create mode 100644 astropy_helpers/astropy_helpers/sphinx/local/python3_local_links.txt create mode 100644 astropy_helpers/astropy_helpers/sphinx/setup_package.py create mode 100644 astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/globaltoc.html create mode 100644 astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/layout.html create mode 100644 astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/localtoc.html create mode 100644 astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/searchbox.html create mode 100644 astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout.svg create mode 100644 astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout_20.png create mode 100644 astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.ico create mode 100644 astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.svg create mode 100644 astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo_32.png create mode 100644 astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/bootstrap-astropy.css create mode 100644 astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/copybutton.js create mode 100644 astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/sidebar.js create mode 100644 astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/theme.conf create mode 100644 astropy_helpers/astropy_helpers/test_helpers.py create mode 100644 astropy_helpers/astropy_helpers/utils.py create mode 100644 astropy_helpers/astropy_helpers/version.py create mode 100644 astropy_helpers/astropy_helpers/version_helpers.py create mode 100644 astropy_helpers/ez_setup.py create mode 100644 astropy_helpers/licenses/LICENSE_ASTROSCRAPPY.rst create mode 100644 astropy_helpers/licenses/LICENSE_COPYBUTTON.rst create mode 100644 astropy_helpers/licenses/LICENSE_NUMPYDOC.rst create mode 100644 cextern/.gitignore create mode 100644 cextern/README.rst create mode 100644 cextern/cfitsio/License.txt create mode 100644 cextern/cfitsio/adler32.c create mode 100644 cextern/cfitsio/buffers.c create mode 100644 cextern/cfitsio/cfileio.c create mode 100644 cextern/cfitsio/changes.txt create mode 100644 cextern/cfitsio/checksum.c create mode 100644 cextern/cfitsio/crc32.c create mode 100644 cextern/cfitsio/crc32.h create mode 100644 cextern/cfitsio/deflate.c create mode 100644 cextern/cfitsio/deflate.h create mode 100644 cextern/cfitsio/drvrfile.c create mode 100644 cextern/cfitsio/drvrgsiftp.c create mode 100644 cextern/cfitsio/drvrgsiftp.h create mode 100644 cextern/cfitsio/drvrmem.c create mode 100644 cextern/cfitsio/drvrnet.c create mode 100644 cextern/cfitsio/drvrsmem.c create mode 100644 cextern/cfitsio/drvrsmem.h create mode 100644 cextern/cfitsio/editcol.c create mode 100644 cextern/cfitsio/edithdu.c create mode 100644 cextern/cfitsio/eval.l create mode 100644 cextern/cfitsio/eval.y create mode 100644 cextern/cfitsio/eval_defs.h create mode 100644 cextern/cfitsio/eval_f.c create mode 100644 cextern/cfitsio/eval_l.c create mode 100644 cextern/cfitsio/eval_tab.h create mode 100644 cextern/cfitsio/eval_y.c create mode 100644 cextern/cfitsio/fits_hcompress.c create mode 100644 cextern/cfitsio/fits_hdecompress.c create mode 100644 cextern/cfitsio/fitscore.c create mode 100644 cextern/cfitsio/fitsio.h create mode 100644 cextern/cfitsio/fitsio2.h create mode 100644 cextern/cfitsio/getcol.c create mode 100644 cextern/cfitsio/getcolb.c create mode 100644 cextern/cfitsio/getcold.c create mode 100644 cextern/cfitsio/getcole.c create mode 100644 cextern/cfitsio/getcoli.c create mode 100644 cextern/cfitsio/getcolj.c create mode 100644 cextern/cfitsio/getcolk.c create mode 100644 cextern/cfitsio/getcoll.c create mode 100644 cextern/cfitsio/getcols.c create mode 100644 cextern/cfitsio/getcolsb.c create mode 100644 cextern/cfitsio/getcolui.c create mode 100644 cextern/cfitsio/getcoluj.c create mode 100644 cextern/cfitsio/getcoluk.c create mode 100644 cextern/cfitsio/getkey.c create mode 100644 cextern/cfitsio/group.c create mode 100644 cextern/cfitsio/group.h create mode 100644 cextern/cfitsio/grparser.c create mode 100644 cextern/cfitsio/grparser.h create mode 100644 cextern/cfitsio/histo.c create mode 100644 cextern/cfitsio/imcompress.c create mode 100644 cextern/cfitsio/infback.c create mode 100644 cextern/cfitsio/inffast.c create mode 100644 cextern/cfitsio/inffast.h create mode 100644 cextern/cfitsio/inffixed.h create mode 100644 cextern/cfitsio/inflate.c create mode 100644 cextern/cfitsio/inflate.h create mode 100644 cextern/cfitsio/inftrees.c create mode 100644 cextern/cfitsio/inftrees.h create mode 100644 cextern/cfitsio/iraffits.c create mode 100644 cextern/cfitsio/longnam.h create mode 100644 cextern/cfitsio/modkey.c create mode 100644 cextern/cfitsio/pliocomp.c create mode 100644 cextern/cfitsio/putcol.c create mode 100644 cextern/cfitsio/putcolb.c create mode 100644 cextern/cfitsio/putcold.c create mode 100644 cextern/cfitsio/putcole.c create mode 100644 cextern/cfitsio/putcoli.c create mode 100644 cextern/cfitsio/putcolj.c create mode 100644 cextern/cfitsio/putcolk.c create mode 100644 cextern/cfitsio/putcoll.c create mode 100644 cextern/cfitsio/putcols.c create mode 100644 cextern/cfitsio/putcolsb.c create mode 100644 cextern/cfitsio/putcolu.c create mode 100644 cextern/cfitsio/putcolui.c create mode 100644 cextern/cfitsio/putcoluj.c create mode 100644 cextern/cfitsio/putcoluk.c create mode 100644 cextern/cfitsio/putkey.c create mode 100644 cextern/cfitsio/quantize.c create mode 100644 cextern/cfitsio/region.c create mode 100644 cextern/cfitsio/region.h create mode 100644 cextern/cfitsio/ricecomp.c create mode 100644 cextern/cfitsio/scalnull.c create mode 100644 cextern/cfitsio/swapproc.c create mode 100644 cextern/cfitsio/trees.c create mode 100644 cextern/cfitsio/trees.h create mode 100644 cextern/cfitsio/uncompr.c create mode 100644 cextern/cfitsio/wcssub.c create mode 100644 cextern/cfitsio/wcsutil.c create mode 100644 cextern/cfitsio/zcompress.c create mode 100644 cextern/cfitsio/zconf.h create mode 100644 cextern/cfitsio/zlib.h create mode 100644 cextern/cfitsio/zuncompress.c create mode 100644 cextern/cfitsio/zutil.c create mode 100644 cextern/cfitsio/zutil.h create mode 100644 cextern/erfa/README.rst create mode 100644 cextern/erfa/a2af.c create mode 100644 cextern/erfa/a2tf.c create mode 100644 cextern/erfa/ab.c create mode 100644 cextern/erfa/af2a.c create mode 100644 cextern/erfa/anp.c create mode 100644 cextern/erfa/anpm.c create mode 100644 cextern/erfa/apcg.c create mode 100644 cextern/erfa/apcg13.c create mode 100644 cextern/erfa/apci.c create mode 100644 cextern/erfa/apci13.c create mode 100644 cextern/erfa/apco.c create mode 100644 cextern/erfa/apco13.c create mode 100644 cextern/erfa/apcs.c create mode 100644 cextern/erfa/apcs13.c create mode 100644 cextern/erfa/aper.c create mode 100644 cextern/erfa/aper13.c create mode 100644 cextern/erfa/apio.c create mode 100644 cextern/erfa/apio13.c create mode 100644 cextern/erfa/atci13.c create mode 100644 cextern/erfa/atciq.c create mode 100644 cextern/erfa/atciqn.c create mode 100644 cextern/erfa/atciqz.c create mode 100644 cextern/erfa/atco13.c create mode 100644 cextern/erfa/atic13.c create mode 100644 cextern/erfa/aticq.c create mode 100644 cextern/erfa/aticqn.c create mode 100644 cextern/erfa/atio13.c create mode 100644 cextern/erfa/atioq.c create mode 100644 cextern/erfa/atoc13.c create mode 100644 cextern/erfa/atoi13.c create mode 100644 cextern/erfa/atoiq.c create mode 100644 cextern/erfa/bi00.c create mode 100644 cextern/erfa/bp00.c create mode 100644 cextern/erfa/bp06.c create mode 100644 cextern/erfa/bpn2xy.c create mode 100644 cextern/erfa/c2i00a.c create mode 100644 cextern/erfa/c2i00b.c create mode 100644 cextern/erfa/c2i06a.c create mode 100644 cextern/erfa/c2ibpn.c create mode 100644 cextern/erfa/c2ixy.c create mode 100644 cextern/erfa/c2ixys.c create mode 100644 cextern/erfa/c2s.c create mode 100644 cextern/erfa/c2t00a.c create mode 100644 cextern/erfa/c2t00b.c create mode 100644 cextern/erfa/c2t06a.c create mode 100644 cextern/erfa/c2tcio.c create mode 100644 cextern/erfa/c2teqx.c create mode 100644 cextern/erfa/c2tpe.c create mode 100644 cextern/erfa/c2txy.c create mode 100644 cextern/erfa/cal2jd.c create mode 100644 cextern/erfa/cp.c create mode 100644 cextern/erfa/cpv.c create mode 100644 cextern/erfa/cr.c create mode 100644 cextern/erfa/d2dtf.c create mode 100644 cextern/erfa/d2tf.c create mode 100644 cextern/erfa/dat.c create mode 100644 cextern/erfa/dtdb.c create mode 100644 cextern/erfa/dtf2d.c create mode 100644 cextern/erfa/eceq06.c create mode 100644 cextern/erfa/ecm06.c create mode 100644 cextern/erfa/ee00.c create mode 100644 cextern/erfa/ee00a.c create mode 100644 cextern/erfa/ee00b.c create mode 100644 cextern/erfa/ee06a.c create mode 100644 cextern/erfa/eect00.c create mode 100644 cextern/erfa/eform.c create mode 100644 cextern/erfa/eo06a.c create mode 100644 cextern/erfa/eors.c create mode 100644 cextern/erfa/epb.c create mode 100644 cextern/erfa/epb2jd.c create mode 100644 cextern/erfa/epj.c create mode 100644 cextern/erfa/epj2jd.c create mode 100644 cextern/erfa/epv00.c create mode 100644 cextern/erfa/eqec06.c create mode 100644 cextern/erfa/eqeq94.c create mode 100644 cextern/erfa/era00.c create mode 100644 cextern/erfa/erfa.h create mode 100644 cextern/erfa/erfaextra.h create mode 100644 cextern/erfa/erfam.h create mode 100644 cextern/erfa/erfaversion.c create mode 100644 cextern/erfa/fad03.c create mode 100644 cextern/erfa/fae03.c create mode 100644 cextern/erfa/faf03.c create mode 100644 cextern/erfa/faju03.c create mode 100644 cextern/erfa/fal03.c create mode 100644 cextern/erfa/falp03.c create mode 100644 cextern/erfa/fama03.c create mode 100644 cextern/erfa/fame03.c create mode 100644 cextern/erfa/fane03.c create mode 100644 cextern/erfa/faom03.c create mode 100644 cextern/erfa/fapa03.c create mode 100644 cextern/erfa/fasa03.c create mode 100644 cextern/erfa/faur03.c create mode 100644 cextern/erfa/fave03.c create mode 100644 cextern/erfa/fk52h.c create mode 100644 cextern/erfa/fk5hip.c create mode 100644 cextern/erfa/fk5hz.c create mode 100644 cextern/erfa/fw2m.c create mode 100644 cextern/erfa/fw2xy.c create mode 100644 cextern/erfa/g2icrs.c create mode 100644 cextern/erfa/gc2gd.c create mode 100644 cextern/erfa/gc2gde.c create mode 100644 cextern/erfa/gd2gc.c create mode 100644 cextern/erfa/gd2gce.c create mode 100644 cextern/erfa/gmst00.c create mode 100644 cextern/erfa/gmst06.c create mode 100644 cextern/erfa/gmst82.c create mode 100644 cextern/erfa/gst00a.c create mode 100644 cextern/erfa/gst00b.c create mode 100644 cextern/erfa/gst06.c create mode 100644 cextern/erfa/gst06a.c create mode 100644 cextern/erfa/gst94.c create mode 100644 cextern/erfa/h2fk5.c create mode 100644 cextern/erfa/hfk5z.c create mode 100644 cextern/erfa/icrs2g.c create mode 100644 cextern/erfa/ir.c create mode 100644 cextern/erfa/jd2cal.c create mode 100644 cextern/erfa/jdcalf.c create mode 100644 cextern/erfa/ld.c create mode 100644 cextern/erfa/ldn.c create mode 100644 cextern/erfa/ldsun.c create mode 100644 cextern/erfa/lteceq.c create mode 100644 cextern/erfa/ltecm.c create mode 100644 cextern/erfa/lteqec.c create mode 100644 cextern/erfa/ltp.c create mode 100644 cextern/erfa/ltpb.c create mode 100644 cextern/erfa/ltpecl.c create mode 100644 cextern/erfa/ltpequ.c create mode 100644 cextern/erfa/num00a.c create mode 100644 cextern/erfa/num00b.c create mode 100644 cextern/erfa/num06a.c create mode 100644 cextern/erfa/numat.c create mode 100644 cextern/erfa/nut00a.c create mode 100644 cextern/erfa/nut00b.c create mode 100644 cextern/erfa/nut06a.c create mode 100644 cextern/erfa/nut80.c create mode 100644 cextern/erfa/nutm80.c create mode 100644 cextern/erfa/obl06.c create mode 100644 cextern/erfa/obl80.c create mode 100644 cextern/erfa/p06e.c create mode 100644 cextern/erfa/p2pv.c create mode 100644 cextern/erfa/p2s.c create mode 100644 cextern/erfa/pap.c create mode 100644 cextern/erfa/pas.c create mode 100644 cextern/erfa/pb06.c create mode 100644 cextern/erfa/pdp.c create mode 100644 cextern/erfa/pfw06.c create mode 100644 cextern/erfa/plan94.c create mode 100644 cextern/erfa/pm.c create mode 100644 cextern/erfa/pmat00.c create mode 100644 cextern/erfa/pmat06.c create mode 100644 cextern/erfa/pmat76.c create mode 100644 cextern/erfa/pmp.c create mode 100644 cextern/erfa/pmpx.c create mode 100644 cextern/erfa/pmsafe.c create mode 100644 cextern/erfa/pn.c create mode 100644 cextern/erfa/pn00.c create mode 100644 cextern/erfa/pn00a.c create mode 100644 cextern/erfa/pn00b.c create mode 100644 cextern/erfa/pn06.c create mode 100644 cextern/erfa/pn06a.c create mode 100644 cextern/erfa/pnm00a.c create mode 100644 cextern/erfa/pnm00b.c create mode 100644 cextern/erfa/pnm06a.c create mode 100644 cextern/erfa/pnm80.c create mode 100644 cextern/erfa/pom00.c create mode 100644 cextern/erfa/ppp.c create mode 100644 cextern/erfa/ppsp.c create mode 100644 cextern/erfa/pr00.c create mode 100644 cextern/erfa/prec76.c create mode 100644 cextern/erfa/pv2p.c create mode 100644 cextern/erfa/pv2s.c create mode 100644 cextern/erfa/pvdpv.c create mode 100644 cextern/erfa/pvm.c create mode 100644 cextern/erfa/pvmpv.c create mode 100644 cextern/erfa/pvppv.c create mode 100644 cextern/erfa/pvstar.c create mode 100644 cextern/erfa/pvtob.c create mode 100644 cextern/erfa/pvu.c create mode 100644 cextern/erfa/pvup.c create mode 100644 cextern/erfa/pvxpv.c create mode 100644 cextern/erfa/pxp.c create mode 100644 cextern/erfa/refco.c create mode 100644 cextern/erfa/rm2v.c create mode 100644 cextern/erfa/rv2m.c create mode 100644 cextern/erfa/rx.c create mode 100644 cextern/erfa/rxp.c create mode 100644 cextern/erfa/rxpv.c create mode 100644 cextern/erfa/rxr.c create mode 100644 cextern/erfa/ry.c create mode 100644 cextern/erfa/rz.c create mode 100644 cextern/erfa/s00.c create mode 100644 cextern/erfa/s00a.c create mode 100644 cextern/erfa/s00b.c create mode 100644 cextern/erfa/s06.c create mode 100644 cextern/erfa/s06a.c create mode 100644 cextern/erfa/s2c.c create mode 100644 cextern/erfa/s2p.c create mode 100644 cextern/erfa/s2pv.c create mode 100644 cextern/erfa/s2xpv.c create mode 100644 cextern/erfa/sepp.c create mode 100644 cextern/erfa/seps.c create mode 100644 cextern/erfa/sp00.c create mode 100644 cextern/erfa/starpm.c create mode 100644 cextern/erfa/starpv.c create mode 100644 cextern/erfa/sxp.c create mode 100644 cextern/erfa/sxpv.c create mode 100644 cextern/erfa/taitt.c create mode 100644 cextern/erfa/taiut1.c create mode 100644 cextern/erfa/taiutc.c create mode 100644 cextern/erfa/tcbtdb.c create mode 100644 cextern/erfa/tcgtt.c create mode 100644 cextern/erfa/tdbtcb.c create mode 100644 cextern/erfa/tdbtt.c create mode 100644 cextern/erfa/tf2a.c create mode 100644 cextern/erfa/tf2d.c create mode 100644 cextern/erfa/tr.c create mode 100644 cextern/erfa/trxp.c create mode 100644 cextern/erfa/trxpv.c create mode 100644 cextern/erfa/tttai.c create mode 100644 cextern/erfa/tttcg.c create mode 100644 cextern/erfa/tttdb.c create mode 100644 cextern/erfa/ttut1.c create mode 100644 cextern/erfa/ut1tai.c create mode 100644 cextern/erfa/ut1tt.c create mode 100644 cextern/erfa/ut1utc.c create mode 100644 cextern/erfa/utctai.c create mode 100644 cextern/erfa/utcut1.c create mode 100644 cextern/erfa/xy06.c create mode 100644 cextern/erfa/xys00a.c create mode 100644 cextern/erfa/xys00b.c create mode 100644 cextern/erfa/xys06a.c create mode 100644 cextern/erfa/zp.c create mode 100644 cextern/erfa/zpv.c create mode 100644 cextern/erfa/zr.c create mode 100755 cextern/expat/CMake.README create mode 100755 cextern/expat/CMakeLists.txt create mode 100755 cextern/expat/COPYING create mode 100755 cextern/expat/Changes create mode 100755 cextern/expat/ConfigureChecks.cmake create mode 100755 cextern/expat/MANIFEST create mode 100755 cextern/expat/Makefile.in create mode 100755 cextern/expat/README create mode 100644 cextern/expat/aclocal.m4 create mode 100755 cextern/expat/amiga/Makefile create mode 100755 cextern/expat/amiga/README.txt create mode 100755 cextern/expat/amiga/expat.xml create mode 100644 cextern/expat/amiga/expat_68k.c create mode 100755 cextern/expat/amiga/expat_68k.h create mode 100755 cextern/expat/amiga/expat_68k_handler_stubs.c create mode 100644 cextern/expat/amiga/expat_base.h create mode 100755 cextern/expat/amiga/expat_lib.c create mode 100755 cextern/expat/amiga/expat_vectors.c create mode 100755 cextern/expat/amiga/include/inline4/expat.h create mode 100755 cextern/expat/amiga/include/interfaces/expat.h create mode 100755 cextern/expat/amiga/include/libraries/expat.h create mode 100755 cextern/expat/amiga/include/proto/expat.h create mode 100755 cextern/expat/amiga/launch.c create mode 100755 cextern/expat/amiga/stdlib.c create mode 100755 cextern/expat/bcb5/README.txt create mode 100755 cextern/expat/bcb5/all_projects.bpg create mode 100755 cextern/expat/bcb5/elements.bpf create mode 100755 cextern/expat/bcb5/elements.bpr create mode 100755 cextern/expat/bcb5/elements.mak create mode 100755 cextern/expat/bcb5/expat.bpf create mode 100755 cextern/expat/bcb5/expat.bpr create mode 100755 cextern/expat/bcb5/expat.mak create mode 100755 cextern/expat/bcb5/expat_static.bpf create mode 100755 cextern/expat/bcb5/expat_static.bpr create mode 100755 cextern/expat/bcb5/expat_static.mak create mode 100755 cextern/expat/bcb5/expatw.bpf create mode 100755 cextern/expat/bcb5/expatw.bpr create mode 100755 cextern/expat/bcb5/expatw.mak create mode 100755 cextern/expat/bcb5/expatw_static.bpf create mode 100755 cextern/expat/bcb5/expatw_static.bpr create mode 100755 cextern/expat/bcb5/expatw_static.mak create mode 100755 cextern/expat/bcb5/libexpat_mtd.def create mode 100755 cextern/expat/bcb5/libexpatw_mtd.def create mode 100755 cextern/expat/bcb5/makefile.mak create mode 100755 cextern/expat/bcb5/outline.bpf create mode 100755 cextern/expat/bcb5/outline.bpr create mode 100755 cextern/expat/bcb5/outline.mak create mode 100755 cextern/expat/bcb5/setup.bat create mode 100755 cextern/expat/bcb5/xmlwf.bpf create mode 100755 cextern/expat/bcb5/xmlwf.bpr create mode 100755 cextern/expat/bcb5/xmlwf.mak create mode 100755 cextern/expat/configure create mode 100755 cextern/expat/configure.in create mode 100755 cextern/expat/conftools/PrintPath create mode 100755 cextern/expat/conftools/ac_c_bigendian_cross.m4 create mode 100755 cextern/expat/conftools/expat.m4 create mode 100755 cextern/expat/conftools/get-version.sh create mode 100755 cextern/expat/conftools/install-sh create mode 100755 cextern/expat/conftools/ltmain.sh create mode 100755 cextern/expat/doc/expat.png create mode 100755 cextern/expat/doc/reference.html create mode 100755 cextern/expat/doc/style.css create mode 100755 cextern/expat/doc/valid-xhtml10.png create mode 100755 cextern/expat/doc/xmlwf.1 create mode 100755 cextern/expat/doc/xmlwf.sgml create mode 100755 cextern/expat/examples/elements.c create mode 100755 cextern/expat/examples/elements.dsp create mode 100755 cextern/expat/examples/outline.c create mode 100755 cextern/expat/examples/outline.dsp create mode 100755 cextern/expat/expat.dsw create mode 100644 cextern/expat/expat.pc.in create mode 100755 cextern/expat/expat_config.h.cmake create mode 100755 cextern/expat/expat_config.h.in create mode 100755 cextern/expat/lib/Makefile.MPW create mode 100755 cextern/expat/lib/amigaconfig.h create mode 100755 cextern/expat/lib/ascii.h create mode 100755 cextern/expat/lib/asciitab.h create mode 100755 cextern/expat/lib/expat.dsp create mode 100755 cextern/expat/lib/expat.h create mode 100755 cextern/expat/lib/expat_external.h create mode 100755 cextern/expat/lib/expat_static.dsp create mode 100755 cextern/expat/lib/expatw.dsp create mode 100755 cextern/expat/lib/expatw_static.dsp create mode 100755 cextern/expat/lib/iasciitab.h create mode 100755 cextern/expat/lib/internal.h create mode 100755 cextern/expat/lib/latin1tab.h create mode 100755 cextern/expat/lib/libexpat.def create mode 100755 cextern/expat/lib/libexpatw.def create mode 100755 cextern/expat/lib/macconfig.h create mode 100755 cextern/expat/lib/nametab.h create mode 100755 cextern/expat/lib/utf8tab.h create mode 100755 cextern/expat/lib/winconfig.h create mode 100755 cextern/expat/lib/xmlparse.c create mode 100755 cextern/expat/lib/xmlrole.c create mode 100755 cextern/expat/lib/xmlrole.h create mode 100755 cextern/expat/lib/xmltok.c create mode 100755 cextern/expat/lib/xmltok.h create mode 100755 cextern/expat/lib/xmltok_impl.c create mode 100755 cextern/expat/lib/xmltok_impl.h create mode 100755 cextern/expat/lib/xmltok_ns.c create mode 100644 cextern/expat/m4/libtool.m4 create mode 100644 cextern/expat/m4/ltoptions.m4 create mode 100644 cextern/expat/m4/ltsugar.m4 create mode 100644 cextern/expat/m4/ltversion.m4 create mode 100644 cextern/expat/m4/lt~obsolete.m4 create mode 100755 cextern/expat/tests/README.txt create mode 100755 cextern/expat/tests/benchmark/README.txt create mode 100755 cextern/expat/tests/benchmark/benchmark.c create mode 100755 cextern/expat/tests/benchmark/benchmark.dsp create mode 100755 cextern/expat/tests/benchmark/benchmark.dsw create mode 100755 cextern/expat/tests/chardata.c create mode 100755 cextern/expat/tests/chardata.h create mode 100755 cextern/expat/tests/minicheck.c create mode 100755 cextern/expat/tests/minicheck.h create mode 100755 cextern/expat/tests/runtests.c create mode 100755 cextern/expat/tests/runtestspp.cpp create mode 100755 cextern/expat/tests/xmltest.sh create mode 100755 cextern/expat/vms/README.vms create mode 100755 cextern/expat/vms/descrip.mms create mode 100755 cextern/expat/vms/expat_config.h create mode 100755 cextern/expat/win32/MANIFEST.txt create mode 100755 cextern/expat/win32/README.txt create mode 100755 cextern/expat/win32/expat.iss create mode 100755 cextern/expat/xmlwf/codepage.c create mode 100755 cextern/expat/xmlwf/codepage.h create mode 100755 cextern/expat/xmlwf/ct.c create mode 100755 cextern/expat/xmlwf/filemap.h create mode 100755 cextern/expat/xmlwf/readfilemap.c create mode 100755 cextern/expat/xmlwf/unixfilemap.c create mode 100755 cextern/expat/xmlwf/win32filemap.c create mode 100755 cextern/expat/xmlwf/xmlfile.c create mode 100755 cextern/expat/xmlwf/xmlfile.h create mode 100755 cextern/expat/xmlwf/xmlmime.c create mode 100755 cextern/expat/xmlwf/xmlmime.h create mode 100755 cextern/expat/xmlwf/xmltchar.h create mode 100755 cextern/expat/xmlwf/xmlurl.h create mode 100755 cextern/expat/xmlwf/xmlwf.c create mode 100755 cextern/expat/xmlwf/xmlwf.dsp create mode 100755 cextern/expat/xmlwf/xmlwin32url.cxx create mode 100755 cextern/trim_wcslib.sh create mode 100644 cextern/wcslib/C/GNUmakefile create mode 100644 cextern/wcslib/C/cel.c create mode 100644 cextern/wcslib/C/cel.h create mode 100644 cextern/wcslib/C/dis.c create mode 100644 cextern/wcslib/C/dis.h create mode 100644 cextern/wcslib/C/fitshdr.h create mode 100644 cextern/wcslib/C/fitshdr.l create mode 100644 cextern/wcslib/C/flexed/README create mode 100644 cextern/wcslib/C/flexed/fitshdr.c create mode 100644 cextern/wcslib/C/flexed/wcsbth.c create mode 100644 cextern/wcslib/C/flexed/wcspih.c create mode 100644 cextern/wcslib/C/flexed/wcsulex.c create mode 100644 cextern/wcslib/C/flexed/wcsutrn.c create mode 100644 cextern/wcslib/C/getwcstab.c create mode 100644 cextern/wcslib/C/getwcstab.h create mode 100644 cextern/wcslib/C/lin.c create mode 100644 cextern/wcslib/C/lin.h create mode 100644 cextern/wcslib/C/log.c create mode 100644 cextern/wcslib/C/log.h create mode 100644 cextern/wcslib/C/prj.c create mode 100644 cextern/wcslib/C/prj.h create mode 100644 cextern/wcslib/C/spc.c create mode 100644 cextern/wcslib/C/spc.h create mode 100644 cextern/wcslib/C/sph.c create mode 100644 cextern/wcslib/C/sph.h create mode 100644 cextern/wcslib/C/spx.c create mode 100644 cextern/wcslib/C/spx.h create mode 100644 cextern/wcslib/C/tab.c create mode 100644 cextern/wcslib/C/tab.h create mode 100644 cextern/wcslib/C/wcs.c create mode 100644 cextern/wcslib/C/wcs.h create mode 100644 cextern/wcslib/C/wcsbth.l create mode 100644 cextern/wcslib/C/wcserr.c create mode 100644 cextern/wcslib/C/wcserr.h create mode 100644 cextern/wcslib/C/wcsfix.c create mode 100644 cextern/wcslib/C/wcsfix.h create mode 100644 cextern/wcslib/C/wcshdr.c create mode 100644 cextern/wcslib/C/wcshdr.h create mode 100644 cextern/wcslib/C/wcslib.h create mode 100644 cextern/wcslib/C/wcsmath.h create mode 100644 cextern/wcslib/C/wcspih.l create mode 100644 cextern/wcslib/C/wcsprintf.c create mode 100644 cextern/wcslib/C/wcsprintf.h create mode 100644 cextern/wcslib/C/wcstrig.c create mode 100644 cextern/wcslib/C/wcstrig.h create mode 100644 cextern/wcslib/C/wcsulex.l create mode 100644 cextern/wcslib/C/wcsunits.c create mode 100644 cextern/wcslib/C/wcsunits.h create mode 100644 cextern/wcslib/C/wcsutil.c create mode 100644 cextern/wcslib/C/wcsutil.h create mode 100644 cextern/wcslib/C/wcsutrn.l create mode 100644 cextern/wcslib/CHANGES create mode 100644 cextern/wcslib/COPYING create mode 100644 cextern/wcslib/COPYING.LESSER create mode 100644 cextern/wcslib/GNUmakefile create mode 100644 cextern/wcslib/INSTALL create mode 100644 cextern/wcslib/README create mode 100644 cextern/wcslib/THANKS create mode 100644 cextern/wcslib/VALIDATION create mode 100755 cextern/wcslib/config/config.guess create mode 100755 cextern/wcslib/config/config.sub create mode 100755 cextern/wcslib/config/elisp-comp create mode 100755 cextern/wcslib/config/install-sh create mode 100755 cextern/wcslib/config/mdate-sh create mode 100755 cextern/wcslib/config/missing create mode 100755 cextern/wcslib/config/mkinstalldirs create mode 100755 cextern/wcslib/config/move-if-change create mode 100755 cextern/wcslib/configure create mode 100644 cextern/wcslib/configure.ac create mode 100644 cextern/wcslib/flavours create mode 100644 cextern/wcslib/makedefs.in create mode 100644 cextern/wcslib/wcsconfig.h.in create mode 100644 cextern/wcslib/wcsconfig_f77.h.in create mode 100644 cextern/wcslib/wcsconfig_tests.h.in create mode 100644 cextern/wcslib/wcsconfig_utils.h.in create mode 100644 cextern/wcslib/wcslib.pc.in create mode 100644 docs/Makefile create mode 100644 docs/_pkgtemplate.rst create mode 100644 docs/_static/astropy_banner.svg create mode 100644 docs/_static/astropy_banner_96.png create mode 100644 docs/_static/astropy_logo.pdf create mode 100644 docs/_static/timer_prediction_pow10.png create mode 100644 docs/_templates/autosummary/base.rst create mode 100644 docs/_templates/autosummary/class.rst create mode 100644 docs/_templates/autosummary/module.rst create mode 100644 docs/analytic_functions/index.rst create mode 100644 docs/changelog.rst create mode 100644 docs/conf.py create mode 100644 docs/config/config_0_4_transition.rst create mode 100644 docs/config/index.rst create mode 100644 docs/constants/index.rst create mode 100644 docs/convolution/images/astropy.png create mode 100644 docs/convolution/images/original.png create mode 100644 docs/convolution/images/scipy.png create mode 100644 docs/convolution/index.rst create mode 100644 docs/convolution/kernels.rst create mode 100644 docs/convolution/non_normalized_kernels.rst create mode 100644 docs/convolution/using.rst create mode 100644 docs/coordinates/angles.rst create mode 100644 docs/coordinates/definitions.rst create mode 100644 docs/coordinates/formatting.rst create mode 100644 docs/coordinates/frames.rst create mode 100644 docs/coordinates/galactocentric.rst create mode 100644 docs/coordinates/index.rst create mode 100644 docs/coordinates/inplace.rst create mode 100644 docs/coordinates/matchsep.rst create mode 100644 docs/coordinates/references.txt create mode 100644 docs/coordinates/remote_methods.rst create mode 100644 docs/coordinates/representations.rst create mode 100644 docs/coordinates/skycoord.rst create mode 100644 docs/coordinates/solarsystem.rst create mode 100644 docs/coordinates/transforming.rst create mode 100644 docs/coordinates/velocities.rst create mode 100644 docs/cosmology/index.rst create mode 100644 docs/credits.rst create mode 100644 docs/development/astropy-package-template.rst create mode 100644 docs/development/building.rst create mode 100644 docs/development/ccython.rst create mode 100644 docs/development/codeguide.rst create mode 100644 docs/development/codeguide_emacs.rst create mode 100644 docs/development/docguide.rst create mode 100644 docs/development/docrules.rst create mode 100644 docs/development/releasing.rst create mode 100644 docs/development/scripts.rst create mode 100644 docs/development/testguide.rst create mode 100644 docs/development/vision.rst create mode 100644 docs/development/workflow/additional_git_topics.rst create mode 100644 docs/development/workflow/branch_dropdown.png create mode 100644 docs/development/workflow/command_history.rst create mode 100644 docs/development/workflow/command_history.sh create mode 100644 docs/development/workflow/command_history_with_output.sh create mode 100644 docs/development/workflow/development_workflow.rst create mode 100644 docs/development/workflow/forking_button.png create mode 100644 docs/development/workflow/get_devel_version.rst create mode 100644 docs/development/workflow/git_edit_workflow_examples.rst create mode 100644 docs/development/workflow/git_install.rst create mode 100644 docs/development/workflow/git_links.inc create mode 100644 docs/development/workflow/git_resources.rst create mode 100644 docs/development/workflow/known_projects.inc create mode 100644 docs/development/workflow/links.inc create mode 100644 docs/development/workflow/maintainer_workflow.rst create mode 100644 docs/development/workflow/milestone.png create mode 100644 docs/development/workflow/patches.rst create mode 100644 docs/development/workflow/pull_button.png create mode 100644 docs/development/workflow/terminal_cast.rst create mode 100644 docs/development/workflow/this_project.inc create mode 100644 docs/development/workflow/virtual_pythons.rst create mode 100644 docs/development/workflow/virtualenv_detail.rst create mode 100644 docs/development/workflow/worked_example_switch_branch.png create mode 100644 docs/getting_started.rst create mode 100644 docs/importing_astropy.rst create mode 100644 docs/index.rst create mode 100644 docs/install.rst create mode 100644 docs/io/ascii/base_classes.rst create mode 100644 docs/io/ascii/extension_classes.rst create mode 100644 docs/io/ascii/fast_ascii_io.rst create mode 100644 docs/io/ascii/fixed_width_gallery.rst create mode 100644 docs/io/ascii/index.rst create mode 100644 docs/io/ascii/read.rst create mode 100644 docs/io/ascii/references.txt create mode 100644 docs/io/ascii/toc.txt create mode 100644 docs/io/ascii/write.rst create mode 100644 docs/io/fits/api/cards.rst create mode 100644 docs/io/fits/api/diff.rst create mode 100644 docs/io/fits/api/files.rst create mode 100644 docs/io/fits/api/hdulists.rst create mode 100644 docs/io/fits/api/hdus.rst create mode 100644 docs/io/fits/api/headers.rst create mode 100644 docs/io/fits/api/images.rst create mode 100644 docs/io/fits/api/tables.rst create mode 100644 docs/io/fits/api/verification.rst create mode 100644 docs/io/fits/appendix/faq.rst create mode 100644 docs/io/fits/appendix/header_transition.rst create mode 100644 docs/io/fits/appendix/history.rst create mode 100644 docs/io/fits/index.rst create mode 100644 docs/io/fits/usage/headers.rst create mode 100644 docs/io/fits/usage/image.rst create mode 100644 docs/io/fits/usage/misc.rst create mode 100644 docs/io/fits/usage/scripts.rst create mode 100644 docs/io/fits/usage/table.rst create mode 100644 docs/io/fits/usage/unfamiliar.rst create mode 100644 docs/io/fits/usage/verification.rst create mode 100644 docs/io/misc.rst create mode 100644 docs/io/registry.rst create mode 100644 docs/io/unified.rst create mode 100644 docs/io/votable/.gitignore create mode 100644 docs/io/votable/api_exceptions.rst create mode 100644 docs/io/votable/index.rst create mode 100644 docs/io/votable/references.txt create mode 100644 docs/known_issues.rst create mode 100644 docs/license.rst create mode 100644 docs/logging.rst create mode 100644 docs/make.bat create mode 100644 docs/modeling/algorithms.rst create mode 100644 docs/modeling/bounding-boxes.rst create mode 100644 docs/modeling/compound-models.rst create mode 100644 docs/modeling/fitting.rst create mode 100644 docs/modeling/index.rst create mode 100644 docs/modeling/links.inc create mode 100644 docs/modeling/models.rst create mode 100644 docs/modeling/new.rst create mode 100644 docs/modeling/parameters.rst create mode 100644 docs/modeling/units.rst create mode 100644 docs/nddata/ccddata.rst create mode 100644 docs/nddata/decorator.rst create mode 100644 docs/nddata/index.rst create mode 100644 docs/nddata/mixins/index.rst create mode 100644 docs/nddata/mixins/ndarithmetic.rst create mode 100644 docs/nddata/mixins/ndio.rst create mode 100644 docs/nddata/mixins/ndslicing.rst create mode 100644 docs/nddata/nddata.rst create mode 100644 docs/nddata/subclassing.rst create mode 100644 docs/nddata/utils.rst create mode 100644 docs/nitpick-exceptions create mode 100644 docs/overview.rst create mode 100644 docs/samp/advanced_embed_samp_hub.rst create mode 100644 docs/samp/example_clients.rst create mode 100644 docs/samp/example_hub.rst create mode 100644 docs/samp/example_table_image.rst create mode 100644 docs/samp/index.rst create mode 100644 docs/samp/references.txt create mode 100644 docs/stability.rst create mode 100644 docs/stats/circ.rst create mode 100644 docs/stats/index.rst create mode 100644 docs/stats/lombscargle.rst create mode 100644 docs/stats/ripley.rst create mode 100644 docs/stats/robust.rst create mode 100644 docs/table/access_table.rst create mode 100644 docs/table/construct_table.rst create mode 100644 docs/table/implementation_details.rst create mode 100644 docs/table/index.rst create mode 100644 docs/table/indexing.rst create mode 100644 docs/table/io.rst create mode 100644 docs/table/masking.rst create mode 100644 docs/table/mixin_columns.rst create mode 100644 docs/table/modify_table.rst create mode 100644 docs/table/operations.rst create mode 100644 docs/table/pandas.rst create mode 100644 docs/table/references.txt create mode 100644 docs/table/table_architecture.png create mode 100644 docs/table/table_repr_html.png create mode 100644 docs/table/table_row.png create mode 100644 docs/table/table_show_in_nb.png create mode 100644 docs/testhelpers.rst create mode 100644 docs/time/index.rst create mode 100644 docs/time/references.txt create mode 100644 docs/time/time_scale_conversion.odg create mode 100644 docs/time/time_scale_conversion.png create mode 100644 docs/units/combining_and_defining.rst create mode 100644 docs/units/conversion.rst create mode 100644 docs/units/decomposing_and_composing.rst create mode 100644 docs/units/equivalencies.rst create mode 100644 docs/units/format.rst create mode 100644 docs/units/index.rst create mode 100644 docs/units/logarithmic_units.rst create mode 100644 docs/units/quantity.rst create mode 100644 docs/units/standard_units.rst create mode 100644 docs/utils/iers.rst create mode 100644 docs/utils/index.rst create mode 100644 docs/utils/numpy.rst create mode 100644 docs/visualization/histogram.rst create mode 100644 docs/visualization/index.rst create mode 100644 docs/visualization/lupton_rgb.rst create mode 100644 docs/visualization/normalization.rst create mode 100644 docs/visualization/wcsaxes/controlling_axes.rst create mode 100644 docs/visualization/wcsaxes/custom_frames.rst create mode 100644 docs/visualization/wcsaxes/images_contours.rst create mode 100644 docs/visualization/wcsaxes/index.rst create mode 100644 docs/visualization/wcsaxes/initializing_axes.rst create mode 100644 docs/visualization/wcsaxes/overlaying_coordinate_systems.rst create mode 100644 docs/visualization/wcsaxes/overlays.rst create mode 100644 docs/visualization/wcsaxes/slicing_datacubes.rst create mode 100644 docs/visualization/wcsaxes/ticks_labels_grid.rst create mode 100644 docs/vo/conesearch/client.rst create mode 100644 docs/vo/conesearch/images/astropy_vo_flowchart.png create mode 100644 docs/vo/conesearch/images/client_predict_search_n.png create mode 100644 docs/vo/conesearch/images/client_predict_search_t.png create mode 100644 docs/vo/conesearch/images/validator_html_1.png create mode 100644 docs/vo/conesearch/images/validator_html_2.png create mode 100644 docs/vo/conesearch/images/validator_html_3.png create mode 100644 docs/vo/conesearch/images/validator_html_4.png create mode 100644 docs/vo/conesearch/index.rst create mode 100644 docs/vo/conesearch/validator.rst create mode 100644 docs/vo/index.rst create mode 100644 docs/warnings.rst create mode 100644 docs/wcs/examples/from_file.py create mode 100644 docs/wcs/examples/programmatic.py create mode 100644 docs/wcs/history.rst create mode 100644 docs/wcs/index.rst create mode 100644 docs/wcs/note_sip.rst create mode 100644 docs/wcs/references.rst create mode 100644 docs/wcs/references.txt create mode 100644 docs/wcs/relax.rst create mode 100644 docs/whatsnew/0.1.rst create mode 100644 docs/whatsnew/0.2.rst create mode 100644 docs/whatsnew/0.3.rst create mode 100644 docs/whatsnew/0.4.rst create mode 100644 docs/whatsnew/1.0.rst create mode 100644 docs/whatsnew/1.1.rst create mode 100644 docs/whatsnew/1.2.rst create mode 100644 docs/whatsnew/1.3.rst create mode 100644 docs/whatsnew/2.0.rst create mode 100644 docs/whatsnew/index.rst create mode 100644 examples/README.txt create mode 100644 examples/coordinates/README.txt create mode 100644 examples/coordinates/plot_galactocentric-frame.py create mode 100644 examples/coordinates/plot_obs-planning.py create mode 100644 examples/coordinates/plot_sgr-coordinate-frame.py create mode 100644 examples/coordinates/rv-to-gsr.py create mode 100644 examples/io/Hs-2009-14-a-web.jpg create mode 100644 examples/io/README.txt create mode 100644 examples/io/create-mef.py create mode 100644 examples/io/fits-tables.py create mode 100644 examples/io/modify-fits-header.py create mode 100644 examples/io/plot_fits-image.py create mode 100644 examples/io/skip_create-large-fits.py create mode 100644 examples/io/split-jpeg-to-fits.py create mode 100644 examples/template/example-template.py create mode 100644 ez_setup.py create mode 100644 licenses/AURA_LICENSE.rst create mode 100644 licenses/CONFIGOBJ_LICENSE.rst create mode 100644 licenses/DATATABLES_LICENSE.rst create mode 100644 licenses/ERFA.rst create mode 100644 licenses/EXPAT_LICENSE.rst create mode 100644 licenses/FUTURES_LICENSE.rst create mode 100644 licenses/JQUERY_LICENSE.rst create mode 100644 licenses/NUMPY_LICENSE.rst create mode 100644 licenses/PLY_LICENSE.rst create mode 100644 licenses/PYFITS.rst create mode 100644 licenses/PYTEST_LICENSE.rst create mode 100644 licenses/README.rst create mode 100644 licenses/SIX_LICENSE.rst create mode 100644 licenses/SPHINXEXT_LICENSES.rst create mode 100644 licenses/SYMPY.rst create mode 100644 licenses/WCSLIB_LICENSE.rst create mode 100644 pip-requirements create mode 100644 pip-requirements-dev create mode 100644 pip-requirements-doc create mode 100644 setup.cfg create mode 100755 setup.py create mode 100644 static/wininst_background.bmp diff --git a/.astropy-root b/.astropy-root new file mode 100644 index 0000000..e69de29 diff --git a/CHANGES.rst b/CHANGES.rst new file mode 100644 index 0000000..b720a4a --- /dev/null +++ b/CHANGES.rst @@ -0,0 +1,7984 @@ +2.0.3 (2017-12-13) +================== + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Ecliptic frame classes now support attributes ``v_x``, ``v_y``, ``v_z`` when + using with a Cartesian representation. [#6569] + +- Added a nicer error message when accidentally calling ``frame.representation`` + instead of ``frame.data`` in the context of methods that use ``._apply()``. + [#6561] + +- Creating a new ``SkyCoord`` from a list of multiple ``SkyCoord`` objects now + yield the correct type of frame, and works at all for non-equatorial frames. + [#6612] + +- Improved accuracy of velocity calculation in ``EarthLocation.get_gcrs_posvel``. + [#6699] + +- Improved accuracy of radial velocity corrections in + ``SkyCoord.radial_velocity_correction```. [#6861] + +- The precision of ecliptic frames is now much better, after removing the + nutation from the rotation and fixing the computation of the position of the + Sun. [#6508] + +astropy.extern +^^^^^^^^^^^^^^ + +- Version 0.2.1 of ``pytest-astropy`` is included as an external package. + [#6918] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fix writing the result of ``fitsdiff`` to file with ``--output-file``. [#6621] + +- Fix a minor bug where ``FITS_rec`` instances can not be indexed with tuples + and other sequences that end up with a scalar. [#6955, #6966] + +astropy.io.misc +^^^^^^^^^^^^^^^ + +- Fix ``ImportError`` when ``hdf5`` is imported first in a fresh Python + interpreter in Python 3. [#6604, #6610] + +astropy.nddata +^^^^^^^^^^^^^^ + +- Suppress errors during WCS creation in CCDData.read(). [#6500] + +- Fixed a problem with ``CCDData.read`` when the extension wasn't given and the + primary HDU contained no ``data`` but another HDU did. In that case the header + were not correctly combined. [#6489] + +astropy.stats +^^^^^^^^^^^^^ + +- Fixed an issue where the biweight statistics functions would + sometimes cause runtime underflow/overflow errors for float32 input + arrays. [#6905] + +astropy.table +^^^^^^^^^^^^^ + +- Fixed a problem when printing a table when a column is deleted and + garbage-collected, and the format function caching mechanism happens + to re-use the same cache key. [#6714] + +- Fixed a problem when comparing a unicode masked column (on left side) to + a bytes masked column (on right side). [#6899] + +- Fixed a problem in comparing masked columns in bytes and unicode when the + unicode had masked entries. [#6899] + +astropy.tests +^^^^^^^^^^^^^ + +- Fixed a bug that causes tests for rst files to not be run on certain + platforms. [#6555, #6608] + +- Fixed a bug that caused the doctestplus plugin to not work nicely with the + hypothesis package. [#6605, #6609] + +- Fixed a bug that meant that the data.astropy.org mirror could not be used when + using --remote-data=astropy. [#6724] + +- Support compatibility with new ``pytest-astropy`` plugins. [#6918] + +- When testing, astropy (or the package being tested) is now installed to + a temporary directory instead of copying the build. This allows + entry points to work correctly. [#6890] + +astropy.time +^^^^^^^^^^^^ + +- Initialization of Time instances now is consistent for all formats to + ensure that ``-0.5 <= jd2 < 0.5``. [#6653] + +astropy.units +^^^^^^^^^^^^^ + +- Ensure that ``Quantity`` slices can be set with objects that have a ``unit`` + attribute (such as ``Column``). [#6123] + +astropy.utils +^^^^^^^^^^^^^ + +- ``download_files_in_parallel`` now respects the given ``timeout`` value. + [#6658] + +- Fixed bugs in remote data handling and also in IERS unit test related to path + URL, and URI normalization on Windows. [#6651] + +- Fixed a bug that caused ``get_pkg_data_fileobj`` to not work correctly when + used with non-local data from inside packages. [#6724] + +- Make sure ``get_pkg_data_fileobj`` fails if the URL can not be read, and + correctly falls back on the mirror if necessary. [#6767] + +- Fix the ``finddiff`` option in ``find_current_module`` to properly deal + with submodules. [#6767] + +- Fixed ``pyreadline`` import in ``utils.console.isatty`` for older IPython + versions on Windows. [#6800] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- Fixed the vertical orientation of the ``fits2bitmap`` output bitmap + image to match that of the FITS image. [#6844, #6969] + +- Added a workaround for a bug in matplotlib so that the ``fits2bitmap`` + script generates the correct output file type. [#6969] + + +Other Changes and Additions +--------------------------- + +- No longer require LaTeX to build the documentation locally and + use mathjax instead. [#6701] + +- Fixed broken links in the documentation. [#6745] + +- Ensured that all tests use the Astropy data mirror if needed. [#6767] + + +2.0.2 (2017-09-08) +================== + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Ensure transformations via ICRS also work for coordinates that use cartesian + representations. [#6440] + +- Fixed a bug that was preventing ``SkyCoord`` objects made from lists of other + coordinate objects from being written out to ECSV files. [#6448] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Support the ``GZIP_2`` FITS image compression algorithm as claimed + in docs. [#6486] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- Fixed a bug that wrote out VO table as version 1.2 instead of 1.3. [#6521] + +astropy.table +^^^^^^^^^^^^^ + +- Fix a bug when combining unicode columns via join or vstack. The character + width of the output column was a factor of 4 larger than needed. [#6459] + +astropy.tests +^^^^^^^^^^^^^ + +- Fixed running the test suite using --parallel. [#6415] + +- Added error handling for attempting to run tests in parallel without having + the ``pytest-xdist`` package installed. [#6416] + +- Fixed issue running doctests with pytest>=3.2. [#6423, #6430] + +- Fixed issue caused by antivirus software in response to malformed compressed + files used for testing. [#6522] + +- Updated top-level config file to properly ignore top-level directories. + [#6449] + +astropy.units +^^^^^^^^^^^^^ + +- Quantity._repr_latex_ now respects precision option from numpy + printoptions. [#6412] + +astropy.utils +^^^^^^^^^^^^^ + +- For the ``deprecated_renamed_argument`` decorator, refer to the deprecation‘s + caller instead of ``astropy.utils.decorators``, to makes it easier to find + where the deprecation warnings comes from. [#6422] + + +2.0.1 (2017-07-30) +================== + +Bug Fixes +--------- + +astropy.constants +^^^^^^^^^^^^^^^^^ + +- Fixed Earth radius to be the IAU2015 value for the equatorial radius. + The polar value had erroneously been used in 2.0. [#6400] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Added old frame attribute classes back to top-level namespace of + ``astropy.coordinates``. [#6357] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Scaling an image always uses user-supplied values when given. Added + defaults for scaling when bscale/bzero are not present (float images). + Fixed a small bug in when to reset ``_orig_bscale``. [#5955] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Fixed a bug in initializing compound models with units. [#6398] + +astropy.nddata +^^^^^^^^^^^^^^ + +- Updating CCDData.read() to be more flexible with inputs, don't try to + delete keywords that are missing from the header. [#6388] + +astropy.tests +^^^^^^^^^^^^^ +- Fixed the test command that is run from ``setuptools`` to allow it to + gracefully handle keyboard interrupts and pass them on to the ``pytest`` + subprocess. This prompts ``pytest`` to teardown and display useful traceback + and test information [#6369] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- Ticks and tick labels are now drawn in front of, rather than behind, + gridlines in WCS axes. This improves legibility in situations where + tick labels may be on the interior of the axes frame, such as the right + ascension axis of an all-sky Aitoff or Mollweide projection. [#6361] + +astropy.wcs +^^^^^^^^^^^ + +- Fix the missing wcskey part in _read_sip_kw, this will cause error when reading sip wcs while there is no default CRPIX1 CRPIX2 keywords and only CRPIX1n CRPIX2n in header. [#6372] + + +2.0 (2017-07-07) +================ + +New Features +------------ + +astropy.constants +^^^^^^^^^^^^^^^^^ + +- Constants are now organized into version modules, with physical CODATA + constants in the ``codata2010`` and ``codata2014`` sub-modules, + and astronomical constants defined by the IAU in the ``iau2012`` and + ``iau2015`` sub-modules. The default constants in ``astropy.constants`` + in Astropy 2.0 have been updated from ``iau2012`` to ``iau2015`` and + from ``codata2010`` to ``codata2014``. The constants for 1.3 can be + accessed in the ``astropyconst13`` sub-module and the constants for 2.0 + (the default in ``astropy.constants``) can also be accessed in the + ``astropyconst20`` sub-module [#6083] + +- The GM mass parameters recommended by IAU 2015 Resolution B 3 have been + added as ``GM_sun``, ``GM_jup``, and ``GM_earth``, for the Sun, + Jupiter and the Earth. [#6083] + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- Major change in convolution behavior and keyword arguments. Additional + details are in the API section. [#5782] + +- Convolution with un-normalized and un-normalizable kernels is now possible. + [#5782] + +- Add a new argument, ``normalization_rtol``, to ``convolve_fft``, allowing + the user to specify the relative error tolerance in the normalization of + the convolution kernel. [#5649, #5177] + +- Models can now be convoluted using ``convolve`` or ``convolve_fft``, + which generates a regular compound model. [#6015] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Frame attributes set on ``SkyCoord`` are now always validated, and any + ndarray-like operation (like slicing) will also be done on those. [#5751] + +- Caching of all possible frame attributes was implemented. This greatly + speeds up many ``SkyCoord`` operations. [#5703, #5751] + +- A class hierarchy was added to allow the representation layer to store + differentials (i.e., finite derivatives) of coordinates. This is intended + to enable support for velocities in coordinate frames. [#5871] + +- ``replicate_without_data`` and ``replicate`` methods were added to + coordinate frames that allow copying an existing frame object with various + reference or copy behaviors and possibly overriding frame attributes. [#6182] + +- The representation class instances can now contain differential objects. + This is primarily useful for internal operations that will provide support + for transforming velocity components in coordinate frames. [#6169] + +- ``EarthLocation.to_geodetic()`` (and ``EarthLocation.geodetic``) now return + namedtuples instead of regular tuples. [#6237] + +- ``EarthLocation`` now has ``lat`` and ``lon`` properties (equivalent to, but + preferred over, the previous ``latitude`` and ``longitude``). [#6237] + +- Added a ``radial_velocity_correction`` method to ``SkyCoord`` to do compute + barycentric and heliocentric velocity corrections. [#5752] + +- Added a new ``AffineTransform`` class for coordinate frame transformations. + This class supports matrix operations with vector offsets in position or + any differential quantities (so far, only velocity is supported). The + matrix transform classes now subclass from the base affine transform. + [#6218] + +- Frame objects now have experimental support for velocity components. Most + frames default to accepting proper motion components and radial velocity, + and the velocities transform correctly for any transformation that uses + one of the ``AffineTransform``-type transformations. For other + transformations a finite-difference velocity transformation is available, + although it is not as numerically stable as those that use + ``AffineTransform``-type transformations. [#6219, #6226] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Allow to specify encoding in ``ascii.read``, only for Python 3 and with the + pure-Python readers. [#5448] + +- Writing latex tables with only a ``tabular`` environment is now possible by + setting ``latexdict['tabletyle']`` to ``None``. [#6205] + +- Allow ECSV format to support reading and writing mixin columns like + ``Time``, ``SkyCoord``, ``Latitude``, and ``EarthLocation``. [#6181] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Checking available disk space before writing out file. [#5550, #4065] + +- Change behavior to warn about units that are not FITS-compliant when + writing a FITS file but not when reading. [#5675] + +- Added absolute tolerance parameter when comparing FITS files. [#4729] + +- New convenience function ``printdiff`` to print out diff reports. [#5759] + +- Allow to instantiate a ``BinTableHDU`` directly from a ``Table`` object. + [#6139] + +astropy.io.misc +^^^^^^^^^^^^^^^ + +- YAML representer now also accepts numpy types. [#6077] + +astropy.io.registry +^^^^^^^^^^^^^^^^^^^ + +- New functions to unregister readers, writers, and identifiers. [#6217] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Added ``SmoothlyBrokenPowerLaw1D`` model. [#5656] + +- Add ``n_submodels`` shared method to single and compound models, which + allows users to get the number of components of a given single (compound) + model. [#5747] + +- Added a ``name`` setter for instances of ``_CompoundModel``. [#5741] + +- Added FWHM properties to Gaussian and Moffat models. [#6027] + +- Added support for evaluating models and setting the results for inputs + outside the bounding_box to a user specified ``fill_value``. This + is controlled by a new optional boolean keyword ``with_bounding_box``. [#6081] + +- Added infrastructure support for units on parameters and during + model evaluation and fitting, added support for units on all + functional, power-law, polynomial, and rotation models where this + is appropriate. A new BlackBody1D model has been added. [#4855, #6183, + #6204, #6235] + +astropy.nddata +^^^^^^^^^^^^^^ + +- Added an image class, ``CCDData``. [#6173] + +astropy.stats +^^^^^^^^^^^^^ + +- Added ``biweight_midcovariance`` function. [#5777] + +- Added ``biweight_scale`` and ``biweight_midcorrelation`` + functions. [#5991] + +- ``median_absolute_deviation`` and ``mad_std`` have ``ignore_nan`` option + that will use ``np.ma.median`` with nans masked out or ``np.nanmedian`` + instead of ``np.median`` when computing the median. [#5232] + +- Implemented statistical estimators for Ripley's K Function. [#5712] + +- Added ``SigmaClip`` class. [#6206] + +- Added ``std_ddof`` keyword option to ``sigma_clipped_stats``. + [#6066, #6207] + +astropy.table +^^^^^^^^^^^^^ + +- Issue a warning when assigning a string value to a column and + the string gets truncated. This can occur because numpy string + arrays are fixed-width and silently drop characters which do not + fit within the fixed width. [#5624, #5819] + +- Added functionality to allow ``astropy.units.Quantity`` to be written + as a normal column to FITS files. [#5910] + +- Add support for Quantity columns (within a ``QTable``) in table + ``join()``, ``hstack()`` and ``vstack()`` operations. [#5841] + +- Allow unicode strings to be stored in a Table bytestring column in + Python 3 using UTF-8 encoding. Allow comparison and assignment of + Python 3 ``str`` object in a bytestring column (numpy ``'S'`` dtype). + If comparison with ``str`` instead of ``bytes`` is a problem + (and ``bytes`` is really more logical), please open an issue on GitHub. + [#5700] + +- Added functionality to allow ``astropy.units.Quantity`` to be read + from and written to a VOtable file. [#6132] + +- Added support for reading and writing a table with mixin columns like + ``Time``, ``SkyCoord``, ``Latitude``, and ``EarthLocation`` via the + ASCII ECSV format. [#6181] + +astropy.tests +^^^^^^^^^^^^^ + +- ``enable_deprecations_as_exceptions`` function now accepts additional + user-defined module imports and warning messages to ignore. [#6223, #6334] + +astropy.units +^^^^^^^^^^^^^ + +- The ``astropy.units.quantity_input`` decorator will now convert the output to + the unit specified as a return annotation under Python 3. [#5606] + +- Passing a logarithmic unit to the ``Quantity`` constructor now returns the + appropriate logarithmic quantity class if ``subok=True``. For instance, + ``Quantity(1, u.dex(u.m), subok=True)`` yields ````. [#5928] + +- The ``quantity_input`` decorator now accepts a string physical type in + addition to of a unit object to specify the expected input ``Quantity``'s + physical type. For example, ``@u.quantity_input(x='angle')`` is now + functionally the same as ``@u.quantity_input(x=u.degree)``. [#3847] + +- The ``quantity_input`` decorator now also supports unit checking for + optional keyword arguments and accepts iterables of units or physical types + for specifying multiple valid equivalent inputs. For example, + ``@u.quantity_input(x=['angle', 'angular speed'])`` or + ``@u.quantity_input(x=[u.radian, u.radian/u.yr])`` would both allow either + a ``Quantity`` angle or angular speed passed in to the argument ``x``. + [#5653] + +- Added a new equivalence ``molar_mass_amu`` between g/mol to + atomic mass units. [#6040, #6113] + +- ``Quantity`` has gained a new ``to_value`` method which returns the value + of the quantity in a given unit. [#6127] + +- ``Quantity`` now supports the ``@`` operator for matrix multiplication that + was introduced in Python 3.5, for all supported versions of numpy. [#6144] + +- ``Quantity`` supports the new ``__array_ufunc__`` protocol introduced in + numpy 1.13. As a result, operations that involve unit conversion will be + sped up considerably (by up to a factor of two for costly operations such + as trigonometric ones). [#2583] + +astropy.utils +^^^^^^^^^^^^^ + +- Added a new ``dataurl_mirror`` configuration item in ``astropy.utils.data`` + that is used to indicate a mirror for the astropy data server. [#5547] + +- Added a new convenience method ``get_cached_urls`` to ``astropy.utils.data`` + for getting a list of the URLs in your cache. [#6242] + +astropy.wcs +^^^^^^^^^^^ + +- Upgraded the included wcslib to version 5.16. [#6225] + + The minimum required version of wcslib in is 5.14. + + +API Changes +----------- + +astropy.analytic_functions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- This entire sub-package is deprecated because blackbody has been moved to + ``astropy.modeling.blackbody``. [#6191] + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- Major change in convolution behavior and keyword arguments. + ``astropy.convolution.convolve_fft`` replaced ``interpolate_nan`` with + ``nan_treatment``, and ``astropy.convolution.convolve`` received a new + ``nan_treatment`` argument. ``astropy.convolution.convolve`` also no longer + double-interpolates interpolates over NaNs, although that is now available + as a separate ``astropy.convolution.interpolate_replace_nans`` function. See + :ref:`the backwards compatibility note ` for more + on how to get the old behavior (and why you probably don't want to.) [#5782] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- The ``astropy.coordinates.Galactic`` frame previously was had the cartesian + ordering 'w', 'u', 'v' (for 'x', 'y', and 'z', respectively). This was an + error and against the common convention. The 'x', 'y', and 'z' axes now + map to 'u', 'v', and 'w', following the right-handed ('u' points to + the Galactic center) convention. [#6330] + +- Removed deprecated ``angles.rotation_matrix`` and + ``angles.angle_axis``. Use the routines in + ``coordinates.matrix_utilities`` instead. [#6170] + +- ``EarthLocation.latitude`` and ``EarthLocation.longitude`` are now + deprecated in favor of ``EarthLocation.lat`` and ``EarthLocation.lon``. + They former will be removed in a future version. [#6237] + +- The ``FrameAttribute`` class and subclasses have been renamed to just contain + ``Attribute``. For example, ``QuantityFrameAttribute`` is now + ``QuantityAttribute``. [#6300] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- Cosmological models do not include any contribution from neutrinos or photons + by default -- that is, the default value of Tcmb0 is 0. This does not affect + built in models (such as WMAP or Planck). [#6112] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Remove deprecated ``NumCode`` and ``ImgCode`` properties on FITS + ``_ImageBaseHDU``. Use module-level constants ``BITPIX2DTYPE`` and + ``DTYPE2BITPIX`` instead. [#4993] + +- ``comments`` meta key (which is ``io.ascii``'s table convention) is output + to ``COMMENT`` instead of ``COMMENTS`` header. Similarly, ``COMMENT`` + headers are read into ``comments`` meta [#6097] + +- Remove compatibility code which forced loading all HDUs on close. The old + behavior can be used with ``lazy_load_hdus=False``. Because of this change, + trying to access the ``.data`` attribute from an HDU which is not loaded + now raises a ``IndexError`` instead of a ``ValueError``. [#6082] + +- Deprecated ``clobber`` keyword; use ``overwrite``. [#6203] + +- Add EXTVER column to the output of ``HDUList.info()``. [#6124] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Removed deprecated ``Redshift`` model; Use ``RedshiftScaleFactor``. [#6053] + +- Removed deprecated ``Pix2Sky_AZP.check_mu`` and ``Pix2Sky_SZP.check_mu`` + methods. [#6170] + +- Deprecated ``GaussianAbsorption1D`` model, as it can be better represented + by subtracting ``Gaussian1D`` from ``Const1D``. [#6200] + +- Added method ``sum_of_implicit_terms`` to ``Model``, needed when performing + a linear fit to a model that has built-in terms with no corresponding + parameters (primarily the ``1*x`` term of ``Shift``). [#6174] + +astropy.nddata +^^^^^^^^^^^^^^ + +- Removed deprecated usage of parameter ``propagate_uncertainties`` as a + positional keyword. [#6170] + +- Removed deprecated ``support_correlated`` attribute. [#6170] + +- Removed deprecated ``propagate_add``, ``propagate_subtract``, + ``propagate_multiply`` and ``propagate_divide`` methods. [#6170] + +astropy.stats +^^^^^^^^^^^^^ + +- Removed the deprecated ``sig`` and ``varfunc`` keywords in the + ``sigma_clip`` function. [#5715] + +- Added ``modify_sample_size`` keyword to ``biweight_midvariance`` + function. [#5991] + +astropy.table +^^^^^^^^^^^^^ + +- In Python 3, when getting an item from a bytestring Column it is now + converted to ``str``. This means comparing a single item to a ``bytes`` + object will always fail, and instead one must compare with a ``str`` + object. [#5700] + +- Removed the deprecated ``data`` property of Row. [#5729] + +- Removed the deprecated functions ``join``, ``hstack``, ``vstack`` and + ``get_groups`` from np_utils. [#5729] + +- Added ``name`` paramater to method ``astropy.table.Table.add_column`` and + ``names`` parameter to method ``astropy.table.Table.add_columns``, to + provide the flexibility to add unnamed columns, mixin objects and also to + specify explicit names. Default names will be used if not + specified. [#5996] + +- Added optional ``axis`` parameter to ``insert`` method for ``Column`` and + ``MaskedColumn`` classes. [#6092] + +astropy.units +^^^^^^^^^^^^^ + +- Moved ``units.cgs.emu`` to ``units.deprecated.emu`` due to ambiguous + definition of "emu". [#4918, #5906] + +- ``jupiterMass``, ``earthMass``, ``jupiterRad``, and ``earthRad`` no longer + have their prefixed units included in the standard units. If needed, they + can still be found in ``units.deprecated``. [#5661] + +- ``solLum``,``solMass``, and ``solRad`` no longer have their prefixed units + included in the standard units. If needed, they can still be found in + ``units.required_by_vounit``, and are enabled by default. [#5661] + +- Removed deprecated ``Unit.get_converter``. [#6170] + +- Internally, astropy replaced use of ``.to(unit).value`` with the new + ``to_value(unit)`` method, since this is somewhat faster. Any subclasses + that overwrote ``.to``, should also overwrite ``.to_value`` (or + possibly just the private ``._to_value`` method. (If you did this, + please let us know what was lacking that made this necessary!). [#6137] + +astropy.utils +^^^^^^^^^^^^^ + +- Removed the deprecated compatibility modules for Python 2.6 (``argparse``, + ``fractions``, ``gzip``, ``odict``, ``subprocess``) [#5975,#6157,#6164] + +- Removed the deprecated ``zest.releaser`` machinery. [#6282] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- Removed the deprecated ``scale_image`` function. [#6170] + +astropy.vo +^^^^^^^^^^ + +- Cone Search now issues deprecation warning because it is moved to + Astroquery 0.3.5 and will be removed from Astropy in a future version. + [#5558, #5904] + +- The ``astropy.vo.samp`` package has been moved to ``astropy.samp``, and no + longer supports HTTPS/SSL. [#6201, #6213] + +astropy.wcs +^^^^^^^^^^^ + +- Removed deprecated ``wcs.rotateCD``. [#6170] + + +Bug Fixes +--------- + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- Major change in convolution behavior and keyword arguments: + ``astropy.convolution.convolve`` was not performing normalized convolution + in earlier versions of astropy. [#5782] + +- Direct convolution previously implemented the wrong definition of + convolution. This error only affects *asymmetric* kernels. [#6267] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- The ``astropy.coordinates.Galactic`` frame had an incorrect ording for the + 'u', 'v', and 'w' cartesian coordinates. [#6330] + +- The ``astropy.coordinates.search_around_sky``, + ``astropy.coordinates.search_around_3d``, and ``SkyCoord`` equivalent methods + now correctly yield an ``astropy.coordinates.Angle`` as the third return type + even if there are no matches (previously it returned a raw Quantity). [#6347] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +astropy.extern +^^^^^^^^^^^^^^ + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- ``comments`` meta key (which is ``io.ascii``'s table convention) is output + to ``COMMENT`` instead of ``COMMENTS`` header. Similarly, ``COMMENT`` + headers are read into ``comments`` meta [#6097] + +- Use more sensible fix values for invalid NAXISj header values. [#5935] + +- Close file on error to avoid creating a ``ResourceWarning`` warning + about an unclosed file. [#6168, #6177] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Creating a compound model where one of the submodels is + a compound model whose parameters were changed now uses the + updated parameters and not the parameters of the original model. [#5741] + +- Allow ``Mapping`` and ``Identity`` to be fittable. [#6018] + +- Gaussian models now impose positive ``stddev`` in fitting. [#6019] + +- OrthoPolynomialBase (Chebyshev2D / Legendre2D) models were being evaluated + incorrectly when part of a compound model (using the parameters from the + original model), which in turn caused fitting to fail as a no-op. [#6085] + +- Allow ``Ring2D`` to be defined using ``r_out``. [#6192] + +- Make ``LinearLSQFitter`` produce correct results with fixed model + parameters and allow ``Shift`` and ``Scale`` to be fitted with + ``LinearLSQFitter`` and ``LevMarLSQFitter``. [#6174] + +astropy.stats +^^^^^^^^^^^^^ + +- Allow to choose which median function is used in ``mad_std`` and + ``median_absolute_deviation``. And allow to use these functions with + a multi-dimensional ``axis``. [#5835] + +- Fixed ``biweight_midvariance`` so that by default it returns a + variance that agrees with the standard definition. [#5991] + +astropy.table +^^^^^^^^^^^^^ + +- Fix a problem with vstack for bytes columns in Python 3. [#5628] + +- Fix QTable add/insert row for multidimensional Quantity. [#6092] + +astropy.time +^^^^^^^^^^^^ + +- Fixed the initial condition of ``TimeFITS`` to allow scale, FITS scale + and FITS realization to be checked and equated properly. [#6202] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- Fixed a bug that caused the default WCS to return coordinates offset by + one. [#6339] + +astropy.vo +^^^^^^^^^^ + +- Fixed a bug in vo.samp when stopping a hub for which a lockfile was + not created. [#6211] + + +Other Changes and Additions +--------------------------- + +- Numpy 1.7 and 1.8 are no longer supported. [#6006] + +- Python 3.3 is no longer suppored. [#6020] + +- The bundled ERFA was updated to version 1.4.0. [#6239] + +- The bundled version of pytest has now been removed, but the + astropy.tests.helper.pytest import will continue to work properly. + Affiliated packages should nevertheless transition to importing pytest + directly rather than from astropy.tests.helper. This also means that + pytest is now a formal requirement for testing for both Astropy and + for affiliated packages. [#5694] + + +1.3.3 (2017-05-29) +================== + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Fixed a bug where ``StaticMatrixTransform`` erroneously copied frame + attributes from the input coordinate to the output frame. In practice, this + didn't actually affect any transforms in Astropy but may change behavior for + users who explicitly used the ``StaticMatrixTransform`` in their own code. + [#6045] + +- Fixed ``get_icrs_coordinates`` to loop through all the urls in case one + raises an exception. [#5864] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fix table header not written out properly when ``fits.writeto()`` + convenience function is used. [#6042] + +- Fix writing out read-only arrays. [#6036] + +- Extension headers are written out properly when the ``fits.update()`` + convenience function is used. [#6058] + +- Angstrom, erg, G, and barn are no more reported as deprecated FITS units. + [#5929] + +astropy.table +^^^^^^^^^^^^^ + +- Fix problem with Table pprint/pformat raising an exception for + non-UTF-8 compliant bytestring data. [#6117] + +astropy.units +^^^^^^^^^^^^^ + +- Allow strings 'nan' and 'inf' as Quantity inputs. [#5958] + +- Add support for ``positive`` and ``divmod`` ufuncs (new in numpy 1.13). + [#5998, #6020, #6116] + +astropy.utils +^^^^^^^^^^^^^ + +- On systems that do not have ``pkg_resources`` non-numerical additions to + version numbers like ``dev`` or ``rc1`` are stripped in ``minversion`` to + avoid a ``TypeError`` in ``distutils.version.LooseVersion`` [#5944] + +- Fix ``auto_download`` setting ignored in ``Time.ut1``. [#6033] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- Fix bug in ManualInterval which caused the limits to be returned incorrectly + if set to zero, and fix defaults for ManualInterval in the presence of NaNs. + [#6088] + +- Get rid of warnings that occurred when slicing a cube due to the tick + locator trying to find ticks for the sliced axis. [#6104] + +- Accept normal Matplotlib keyword arguments in set_xlabel and set_ylabel + functions. [#5686, #5692, #6060] + +- Fix a bug that caused labels to be missing from frames with labels that + could change direction mid-axis, such as EllipticalFrame. Also ensure + that empty tick labels do not cause any warnings. [#6063] + + +1.3.2 (2017-03-30) +================== + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Ensure that checking equivalance of ``SkyCoord`` objects works with + non-scalar attributes [#5884, #5887] + +- Ensure that transformation to frames with multi-dimensional attributes + works as expected [#5890, #5897] + +- Make sure all ``BaseRepresentation`` objects can be output as strings. + [#5889, #5897] + +astropy.units +^^^^^^^^^^^^^ + +- Add support for ``heaviside`` ufunc (new in numpy 1.13). [#5920] + +astropy.utils +^^^^^^^^^^^^^ + +- Fix to allow the C-based _fast_iterparse() VOTable XML parser to + relloc() its buffers instead of overflowing them. [#5824, #5869] + + +Other Changes and Additions +--------------------------- + +- File permissions are revised in the released source distribution. [#5912] + + +1.3.1 (2017-03-18) +================== + +New Features +------------ + +astropy.utils +^^^^^^^^^^^^^ + +- The ``deprecated_renamed_argument`` decorator got a new ``pending`` + parameter to suppress the deprecation warnings. [#5761] + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Changed ``SkyCoord`` so that frame attributes which are not valid for the + current ``frame`` (but are valid for other frames) are stored on the + ``SkyCoord`` instance instead of the underlying ``frame`` instance (e.g., + setting ``relative_humidity`` on an ICRS ``SkyCoord`` instance.) [#5750] + +- Ensured that ``position_angle`` and ``separation`` give correct answers for + frames with different equinox (see #5722). [#5762] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fix problem with padding bytes written for BinTable columns converted + from unicode [#5280, #5287, #5288, #5296]. + +- Fix out-of-order TUNITn cards when writing tables to FITS. [#5720] + +- Recognize PrimaryHDU when non boolean values are present for the + 'GROUPS' header keyword. [#5808] + +- Fix the insertion of new keywords in compressed image headers + (``CompImageHeader``). [#5866] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Fixed a problem with setting ``bounding_box`` on 1D models. [#5718] + +- Fixed a broadcasting problem with weighted fitting of 2D models + with ``LevMarLSQFitter``. [#5788] + +- Fixed a problem with passing kwargs to fitters, specifically ``verblevel``. [#5815] + +- Changed FittingWithOutlierRemoval to reject on the residual to the fit [#5831] + +astropy.stats +^^^^^^^^^^^^^ + +- Fix the psd normalization for Lomb-Scargle periodograms in the presence + of noise. [#5713] + +- Fix bug in the autofrequency range when ``minimum_frequency`` is specified + but ``maximum_frequency`` is not. [#5738] + +- Ensure that a masked array is returned when sigma clipping fully masked + data. [#5711] + +astropy.table +^^^^^^^^^^^^^ + +- Fix problem where key for caching column format function was not + sufficiently unique. [#5803] + +- Handle sorting NaNs and masked values in jsviewer. [#4052, #5572] + +- Ensure mixin columns can be added to a table using a scalar value for the + right-hand side if the type supports broadcasting. E.g., for an existing + ``QTable``, ``t['q'] = 3*u.m`` will now add a column as expected. [#5820] + +- Fixes the bug of setting/getting values from rows/columns of a table using + numpy array scalars. [#5772] + +astropy.units +^^^^^^^^^^^^^ + +- Fixed problem where IrreducibleUnits could fail to unpickle. [#5868] + +astropy.utils +^^^^^^^^^^^^^ + +- Avoid importing ``ipython`` in ``utils.console`` until it is necessary, to + prevent deprecation warnings when importing, e.g., ``Column``. [#5755] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- Avoid importing matplotlib.pyplot when importing + astropy.visualization.wcsaxes. [#5680, #5684] + +- Ignore Numpy warnings that happen in coordinate transforms in WCSAxes. + [#5792] + +- Fix compatibility issues between WCSAxes and Matplotlib 2.x. [#5786] + +- Fix a bug that caused WCSAxes frame visual properties to not be copied + over when resetting the WCS. [#5791] + +astropy.extern +^^^^^^^^^^^^^^ + +- Fixed a bug where PLY was overwriting its generated files. [#5728] + +Other Changes and Additions +--------------------------- + +- Fixed a deprecation warning that occurred when running tests with + astropy.test(). [#5689] + +- The deprecation of the ``clobber`` argument (originally deprecated in 1.3.0) + in the ``io.fits`` write functions was changed to a "pending" deprecation + (without displaying warnings) for now. [#5761] + +- Updated bundled astropy-helpers to v1.3.1. [#5880] + + +1.3 (2016-12-22) +================ + +New Features +------------ + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- The ``convolve`` and ``convolve_fft`` arguments now support a ``mask`` keyword, + which allows them to also support ``NDData`` objects as inputs. [#5554] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Added an ``of_address`` classmethod to ``EarthLocation`` to enable fast creation of + ``EarthLocation`` objects given an address by querying the Google maps API [#5154]. + +- A new routine, ``get_body_barycentric_posvel`` has been added that allows + one to calculate positions as well as velocities for solar system bodies. + For JPL kernels, this roughly doubles the execution time, so if one requires + only the positions, one should use ``get_body_barycentric``. [#5231] + +- Transformations between coordinate systems can use the more accurate JPL + ephemerides. [#5273, #5436] + +- Arithmetic on representations, such as addition of two representations, + multiplication with a ``Quantity``, or calculating the norm via ``abs``, + has now become possible. Furthermore, there are new methods ``mean``, + ``sum``, ``dot``, and ``cross``. For all these, the representations are + treated as vectors in cartesian space (temporarily converting to + ``CartesianRepresentation`` if necessary). [#5301] + has now become possible. Furthermore, there are news methods ``mean``, + ``sum``, ``dot``, and ``cross`` with obvious meaning. [#5301] + multiplication with a ``Quantity`` has now become possible. Furthermore, + there are new methods ``norm``, ``mean``, ``sum``, ``dot``, and ``cross``. + In all operations, the representations are treated as vectors. They are + temporarily converted to ``CartesianRepresentation`` if necessary. [#5301] + +- ``CartesianRepresentation`` can be initialized with plain arrays by passing + in a ``unit``. Furthermore, for input with a vector array, the coordinates + no longer have to be in the first dimension, but can be at any ``xyz_axis``. + To complement the latter, a new ``get_xyz(xyz_axis)`` method allows one to + get a vector array out along a given axis. [#5439] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Files with "Fortran-style" columns (i.e. double-precision scientific notation + with a character other than "e", like ``1.495978707D+13``) can now be parsed by + the fast reader natively. [#5552] + +- Allow round-tripping masked data tables in most formats by using an + empty string ``''`` as the default representation of masked values + when writing. [#5347] + +- Allow reading HTML tables with unicode column values in Python 2.7. [#5410] + +- Check for self-consistency of ECSV header column names. [#5463] + +- Produce warnings when writing an IPAC table from an astropy table that + contains metadata not supported by the IPAC format. [#4700] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- "Lazy" loading of HDUs now occurs - when an HDU is requested, the file is + only read up to the point where that HDU is found. This can mean a + substantial speedup when accessing files that have many HDUs. [#5065] + +astropy.io.misc +^^^^^^^^^^^^^^^ + +- Added ``io.misc.yaml`` module to support serializing core astropy objects + using the YAML protocol. [#5486] + +astropy.io.registry +^^^^^^^^^^^^^^^^^^^ + +- Added ``delay_doc_updates`` contextmanager to postpone the formatting of + the documentation for the ``read`` and ``write`` methods of the class to + optionally reduce the import time. [#5275] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Added a class to combine astropy fitters and functions to remove outliers + e. g., sigma clip. [#4760] + +- Added a ``Tabular`` model. [#5105] + +- Added ``Hermite1D`` and ``Hermite2D`` polynomial models [#5242] + +- Added the injection of EntryPoints into astropy.modeling.fitting if + they inherit from Fitters class. [#5241] + +- Added bounding box to ``Lorentz1D`` and ``MexicanHat1D`` models. [#5393] + +- Added ``Planar2D`` functional model. [#5456] + +- Updated ``Gaussian2D`` to accept no arguments (will use default x/y_stddev + and theta). [#5537] + +astropy.nddata +^^^^^^^^^^^^^^ + +- Added ``keep`` and ``**kwargs`` parameter to ``support_nddata``. [#5477] + +astropy.stats +^^^^^^^^^^^^^ + +- Added ``axis`` keyword to ``biweight_location`` and + ``biweight_midvariance``. [#5127, #5158] + +astropy.table +^^^^^^^^^^^^^ + +- Allow renaming mixin columns. [#5469] + +- Support generalized value formatting for mixin columns in tables. [#5274] + +- Support persistence of table indices when pickling and copying table. [#5468] + +astropy.tests +^^^^^^^^^^^^^ + +- Install both runtime and test dependencies when running the + ./setup.py test command. These dependencies are specified by the + install_requires and tests_require keywords via setuptools. [#5092] + +- Enable easier subclassing of the TestRunner class. [#5505] + +astropy.time +^^^^^^^^^^^^ + +- ``light_travel_time`` can now use more accurate JPL ephemerides. [#5273, #5436] + +astropy.units +^^^^^^^^^^^^^ + +- Added ``pixel_scale`` and ``plate_scale`` equivalencies. [#4987] + +- The ``spectral_density`` equivalency now supports transformations of + luminosity density. [#5151] + +- ``Quantity`` now accepts strings consisting of a number and unit such + as '10 km/s'. [#5245] + +astropy.utils +^^^^^^^^^^^^^ + +- Added a new decorator: ``deprecated_renamed_argument``. This can be used to + rename a function argument, while it still allows for the use of the older + argument name. [#5214] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- Added a ``make_lupton_rgb`` function to generate color images from three + greyscale images, following the algorithm of Lupton et al. (2004). [#5535] + +- Added ``data`` and ``interval`` inputs to the ``ImageNormalize`` + class. [#5206] + +- Added a new ``simple_norm`` convenience function. [#5206] + +- Added a default stretch for the ``Normalization`` class. [#5206]. + +- Added a default ``vmin/vmax`` for the ``ManualInterval`` class. + [#5206]. + +- The ``wcsaxes`` subpackage has now been integrated in astropy as + ``astropy.visualization.wcsaxes``. This allows plotting of astronomical + data/coordinate systems in Matplotlib. [#5496] + +astropy.wcs +^^^^^^^^^^^ + +- Improved ``footprint_to_file``: allow to specify the coordinate system, and + use by default the one from ``RADESYS``. Overwrite the file instead of + appending to it. [#5494] + + +API Changes +----------- + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- ``discretize_model`` now raises an exception if non-integer ranges are used. + Previously it had incorrect behavior but did not raise an exception. [#5538] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- ``SkyCoord``, ``ICRS``, and other coordinate objects, as well as the + underlying representations such as ``SphericalRepresentation`` and + ``CartesianRepresentation`` can now be reshaped using methods named like the + numpy ones for ``ndarray`` (``reshape``, ``swapaxes``, etc.) + [#4123, #5254, #5482] + +- The ``obsgeoloc`` and ``obsgeovel`` attributes of ``GCRS`` and + ``PrecessedGeocentric`` frames are now stored and returned as + ``CartesianRepresentation`` objects, rather than ``Quantity`` objects. + Similarly, ``EarthLocation.get_gcrs_posvel`` now returns a tuple of + ``CartesianRepresentation`` objects. [#5253] + +- ``search_around_3d`` and ``search_around_sky`` now return units + for the distance matching their input argument when no match is + found, instead of ``dimensionless_unscaled``. [#5528] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- ASCII writers now accept an 'overwrite' argument. + The default behavior is changed so that a warning will be + issued when overwriting an existing file unless ``overwrite=True``. + In a future version this will be changed from a warning to an + exception to prevent accidentally overwriting a file. [#5007] + +- The default representation of masked values when writing tables was + changed from ``'--'`` to the empty string ``''``. Previously any + user-supplied ``fill_values`` parameter would overwrite the class + default, but now the values are prepended to the class default. [#5347] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- The old ``Header`` interface, deprecated since Astropy 0.1 (PyFITS 3.1), has + been removed entirely. See :ref:`header-transition-guide` for explanations + on this change and help on the transition. [#5310] + +- The following functions, classes and methods have been removed: + ``CardList``, ``Card.key``, ``Card.cardimage``, ``Card.ascardimage``, + ``create_card``, ``create_card_from_string``, ``upper_key``, + ``Header.ascard``, ``Header.rename_key``, ``Header.get_history``, + ``Header.get_comment``, ``Header.toTxtFile``, ``Header.fromTxtFile``, + ``new_table``, ``tdump``, ``tcreate``, ``BinTableHDU.tdump``, + ``BinTableHDU.tcreate``. + +- Removed ``txtfile`` argument to the ``Header`` constructor. + +- Removed usage of ``Header.update`` with ``Header.update(keyword, value, + comment)`` arguments. + +- Removed ``startColumn`` and ``endColumn`` arguments to the ``FITS_record`` + constructor. + +- The ``clobber`` argument in FITS writers has been renamed to + ``overwrite``. This change affects the following functions and + methods: ``tabledump``, ``writeto``, ``Header.tofile``, + ``Header.totextfile``, ``_BaseDiff.report``, + ``_BaseHDU.overwrite``, ``BinTableHDU.dump`` and + ``HDUList.writeto``. [#5171] + +- Added an optional ``copy`` parameter to ``fits.Header`` which controls if + a copy is made when creating an ``Header`` from another ``Header``. + [#5005, #5326] + +astropy.io.registry +^^^^^^^^^^^^^^^^^^^ + +- ``.fts`` and ``.fts.gz`` files will be automatically identified as + ``io.fits`` files if no explicit ``format`` is given. [#5211] + +- Added an optional ``readwrite`` parameter for ``get_formats`` to filter + formats for read or write. [#5275] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- ``Gaussian2D`` now raises an error if ``theta`` is set at the same time as + ``cov_matrix`` (previously ``theta`` was silently ignored). [#5537] + +astropy.table +^^^^^^^^^^^^^ + +- Setting an existing table column (e.g. ``t['a'] = [1, 2, 3]``) now defaults + to *replacing* the column with a column corresponding to the new value + (using ``t.replace_column()``) instead of doing an in-place update. Any + existing meta-data in the column (e.g. the unit) is discarded. An + in-place update is still done when the new value is not a valid column, + e.g. ``t['a'] = 0``. To force an in-place update use the pattern + ``t['a'][:] = [1, 2, 3]``. [#5556] + +- Allow ``collections.Mapping``-like ``data`` attribute when initializing a + ``Table`` object (``dict``-like was already possible). [#5213] + +astropy.tests +^^^^^^^^^^^^^ + +- The inputs to the ``TestRunner.run_tests()`` method now must be + keyword arguments (no positional arguments). This applies to the + ``astropy.test()`` function as well. [#5505] + +astropy.utils +^^^^^^^^^^^^^ + +- Renamed ``ignored`` context manager in ``compat.misc`` to ``suppress`` + to be consistent with https://bugs.python.org/issue19266 . [#5003] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- Deprecated the ``scale_image`` function. [#5206] + +- The ``mpl_normalize`` module (containing the ``ImageNormalize`` + class) is now automatically imported with the ``visualization`` + subpackage. [#5491] + +astropy.vo +^^^^^^^^^^ + +- The ``clobber`` argument in ``VOSDatabase.to_json()`` has been + renamed to ``overwrite``. [#5171] + +astropy.wcs +^^^^^^^^^^^ + +- ``wcs.rotateCD()`` was deprecated without a replacement. [#5240] + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Transformations between CIRS and AltAz now correctly account for the + location of the observer. [#5591] + +- GCRS frames representing a location on Earth with multiple obstimes are now + allowed. This means that the solar system routines ``get_body``, + ``get_moon`` and ``get_sun`` now work with non-scalar times and a + non-geocentric observer. [#5253] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Fix issue with units or other astropy core classes stored in table meta. + [#5605] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Copying a ``fits.Header`` using ``copy`` or ``deepcopy`` from the ``copy`` + module will use ``Header.copy`` to ensure that modifying the copy will + not alter the other original Header and vice-versa. [#4990, #5323] + +- ``HDUList.info()`` no longer raises ``AttributeError`` in presence of + ``BZERO``. [#5508] + +- Avoid exceptions with numpy 1.10 and up when using scaled integer data + where ``BZERO`` has float type but integer value. [#4639, #5527] + +- Converting a header card to a string now calls ``self.verify('fix+warn')`` + instead of ``self.verify('fix')`` so headers with invalid keywords will + not raise a ``VerifyError`` on printing. [#887,#5054] + +- ``FITS_Record._convert_ascii`` now converts blank fields to 0 when a + non-blank null column value is set. [#5134, #5394] + +astropy.io.registry +^^^^^^^^^^^^^^^^^^^ + +- ``read`` now correctly raises an IOError if a file with an unknown + extension can't be found, instead of raising IORegistryError: + "Format could not be identified." [#4779] + +astropy.time +^^^^^^^^^^^^ + +- Ensure ``Time`` instances holding a single ``delta_ut1_utc`` can be copied, + flattened, etc. [#5225] + +astropy.units +^^^^^^^^^^^^^ + +- Operations involving ``Angle`` or ``Distance``, or any other + ``SpecificTypeQuantity`` instance, now also keep return an instance of the + same type if the instance was the second argument (if the resulting unit + is consistent with the specific type). [#5327] + +- Inplace operations on ``Angle`` and ``Distance`` instances now raise an + exception if the final unit is not equivalent to radian and meter, resp. + Similarly, views as ``Angle`` and ``Distance`` can now only be taken + from quantities with appropriate units, and views as ``Quantity`` can only + be taken from logarithmic quanties such as ``Magnitude`` if the physical + unit is dimensionless. [#5070] + +- Conversion from quantities to logarithmic units now correctly causes a + logarithmic quantity such as ``Magnitude`` to be returned. [#5183] + + +astropy.wcs +^^^^^^^^^^^ + +- SIP distortion for an alternate WCS is correctly initialized now by + looking at the "CTYPE" values matching the alternate WCS. [#5443] + +Other Changes and Additions +--------------------------- + +- The bundled ERFA was updated to version 1.3.0. This includes the + leap second planned for 2016 Dec 31. + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Initialization of ``Angle`` has been sped up for ``Quantity`` and ``Angle`` + input. [#4970] + +- The use of ``np.matrix`` instances in the transformations has been + deprecated, since this class does not allow stacks of matrices. As a + result, the semi-public functions ``angles.rotation_matrix`` and + ``angles.angle_axis`` are also deprecated, in favour of the new routines + with the same name in ``coordinates.matrix_utilities``. [#5104] + +- A new ``BaseCoordinateFrame.cache`` dictionary has been created to expose + the internal cache. This is useful when modifying representation data + in-place without using ``realize_frame``. Additionally, documentation for + in-place operations on coordinates were added. [#5575] + +- Coordinates and their representations are printed with a slightly different + format, following how numpy >= 1.12 prints structured arrays. [#5423] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- The default cosmological model has been changed to Planck 2015, + and the citation strings have been updated. [#5372] + +astropy.extern +^^^^^^^^^^^^^^ + +- Updated the bundled ``six`` module to version 1.10.0. [#5521] + +- Updated the astropy shipped version of ``PLY`` to version 3.9. [#5526] + +- Updated the astropy shipped version of jQuery to v3.3.1, and dataTables + to v1.10.12. [#5564] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Performance improvements for tables with many columns. [#4985] + +- Removed obsolete code that was previously needed to properly + implement the append mode. [#4793] + +astropy.io.registry +^^^^^^^^^^^^^^^^^^^ + +- Reduced the time spent in the ``get_formats`` function. This also reduces + the time it takes to import astropy subpackages, i.e. + ``astropy.coordinates``. [#5262] + +astropy.units +^^^^^^^^^^^^^ + +- The functions ``add_enabled_units``, ``set_enabled_equivalencies`` and + ``add_enabled_equivalencies`` have been sped up by copying the current + ``_UnitRegistry`` instead of building it from scratch. [#5306] + +- To build the documentation, the ``build_sphinx`` command has been deprecated + in favor of ``build_docs``. [#5179] + +- The ``--remote-data`` option to ``python setup.py test`` can now take + different arguments: ``--remote-data=none`` is the same as not specifying + ``--remote-data`` (skip all tests that require the internet), + ``--remote-data=astropy`` skips all tests that need remote data except those + that require only data from data.astropy.org, and ``--remote-data=any`` is + the same as ``--remote-data`` (run all tests that use remote data). [#5506] + +- The pytest ``recwarn`` fixture has been removed from the tests in favor of + ``utils.catch_warnings``. [#5489] + +- Deprecated escape sequences in strings (Python 3.6) have been removed. [#5489] + + +1.2.2 (2016-12-22) +================== + +Bug Fixes +--------- + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Fix a bug where the ``fill_values`` parameter was ignored when writing a + table to HTML format. [#5379] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Handle unicode FITS BinTable column names on Python 2 [#5204, #4805] + +- Fix reading of float values from ASCII tables, that could be read as + float32 instead of float64 (with the E and F formats). These values are now + always read as float64. [#5362] + +- Fixed memoryleak when using the compression module. [#5399, #5464] + +- Able to insert and remove lower case HIERARCH keywords in a consistent + manner [#5313, #5321] + +astropy.stats +^^^^^^^^^^^^^ + +- Fixed broadcasting in ``sigma_clip`` when using negative ``axis``. [#4988] + +astropy.table +^^^^^^^^^^^^^ + +- Assigning a logarithmic unit to a ``QTable`` column that did not have a + unit yet now correctly turns it into the appropriate function quantity + subclass (such as ``Magnitude`` or ``Dex``). [#5345] + +- Fix default value for ``show_row_index`` in ``Table.show_in_browser``. + [#5562] + +astropy.units +^^^^^^^^^^^^^ + +- For inverse trig functions that operate on quantities, catch any warnings + that occur from evaluating the function on the unscaled quantity value + between __array_prepare__ and __array_wrap__. [#5153] + +- Ensure ``!=`` also works for function units such as ``MagUnit`` [#5345] + +astropy.wcs +^^^^^^^^^^^ + +- Fix use of the ``relax`` keyword in ``to_header`` when used to change the + output precision. [#5164] + +- ``wcs.to_header(relax=True)`` adds a "-SIP" suffix to ``CTYPE`` when SIP + distortion is present in the WCS object. [#5239] + +- Improved log messages in ``to_header``. [#5239] + +Other Changes and Additions +--------------------------- + +- The bundled ERFA was updated to version 1.3.0. This includes the + leap second planned for 2016 Dec 31. + +astropy.stats +^^^^^^^^^^^^^ + +- ``poisson_conf_interval`` with ``'kraft-burrows-nousek'`` interval is now + faster and useable with SciPy versions < 0.14. [#5064, #5290] + + + +1.2.1 (2016-06-22) +================== + +Bug Fixes +--------- + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fixed a bug that caused TFIELDS to not be in the correct position in + compressed image HDU headers under certain circumstances, which created + invalid FITS files. [#5118, #5125] + +astropy.units +^^^^^^^^^^^^^ + +- Fixed an ``ImportError`` that occurred whenever ``astropy.constants`` was + imported before ``astropy.units``. [#5030, #5121] + +- Magnitude zero points used to define ``STmag``, ``ABmag``, ``M_bol`` and + ``m_bol`` are now collected in ``astropy.units.magnitude_zero_points``. + They are not enabled as regular units by default, but can be included + using ``astropy.units.magnitude_zero_points.enable()``. This makes it + possible to round-trip magnitudes as originally intended. [#5030] + +1.2 (2016-06-19) +================ + +General +------- + +- Astropy now requires Numpy 1.7.0 or later. [#4784] + +New Features +------------ + +astropy.constants +^^^^^^^^^^^^^^^^^ + +- Add ``L_bol0``, the luminosity corresponding to absolute bolometric + magnitude zero. [#4262] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- ``CartesianRepresentation`` now includes a transform() method that can take + a 3x3 matrix to transform coordinates. [#4860] + +- Solar system and lunar ephemerides accessible via ``get_body``, + ``get_body_barycentric`` and ``get_moon`` functions. [#4890] + +- Added astrometric frames (i.e., a frame centered on a particular + point/object specified in another frame). [#4909, #4941] + +- Added ``SkyCoord.spherical_offsets_to`` method. [#4338] + +- Recent Earth rotation (IERS) data are now auto-downloaded so that AltAz + transformations for future dates now use the most accurate available + rotation values. [#4436] + +- Add support for heliocentric coordinate frames. [#4314] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- ``angular_diameter_distance_z1z2`` now supports the computation of + the angular diameter distance between a scalar and an array like + argument. [#4593] The method now supports models with negative + Omega_k0 (positive curvature universes) [#4661] and allows z2 < z1. + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- File name could be passed as ``Path`` object. [#4606] + +- Check that columns in ``formats`` specifier exist in the output table + when writing. [#4508, #4511] + +- Allow trailing whitespace in the IPAC header lines. [#4758] + +- Updated to filter out the default parser warning of BeautifulSoup. + [#4551] + +- Added support for reading and writing reStructuredText simple tables. + [#4812] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- File name could be passed as ``Path`` object. [#4606] + +- Header allows a dictionary-like cards argument during creation. [#4663] + +- New function ``convenience.table_to_hdu`` to allow creating a FITS + HDU object directly from an astropy ``Table``. [#4778] + +- New optional arguments ``ignore_missing`` and ``remove_all`` are added + to ``astropy.io.fits.header.remove()``. [#5020] + +astropy.io.registry +^^^^^^^^^^^^^^^^^^^ + +- Added custom ``IORegistryError``. [#4833] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- File name could be passed as ``Path`` object. [#4606] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Added the fittable=True attribute to the Scale and Shift models with tests. [#4718] + +- Added example plots to docstrings for some build-in models. [#4008] + +astropy.nddata +^^^^^^^^^^^^^^ + +- ``UnknownUncertainty`` new subclass of ``NDUncertainty`` that can be used to + save uncertainties that cannot be used for error propagation. [#4272] + +- ``NDArithmeticMixin``: ``add``, ``subtract``, ``multiply`` and ``divide`` + can be used as classmethods but require that two operands are given. These + operands don't need to be NDData instances but they must be convertible to + NDData. This conversion is done internally. Using it on the instance does + not require (but also allows) two operands. [#4272, #4851] + +- ``NDDataRef`` new subclass that implements ``NDData`` together with all + currently available mixins. This class does not implement additional + attributes, methods or a numpy.ndarray-like interface like ``NDDataArray``. + attributes, methods or a numpy.ndarray-like interface like ``NDDataArray``. + [#4797] + +astropy.stats +^^^^^^^^^^^^^ + +- Added ``axis`` keyword for ``mad_std`` function. [#4688, #4689] + +- Added Bayesian and Akaike Information Criteria. [#4716] + +- Added Bayesian upper limits for Poisson count rates. [#4622] + +- Added ``circstats``; a module for computing circular statistics. [#3705, #4472] + +- Added ``jackknife`` resampling method. [#3708, #4439] + +- Updated ``bootstrap`` to allow bootstrapping statistics with multiple + outputs. [#3601] + +- Added ``LombScargle`` class to compute Lomb-Scargle periodograms [#4811] + +astropy.table +^^^^^^^^^^^^^ + +- ``Table.show_in_notebook`` and ``Table.show_in_browser(jsviewer=True)`` now + yield tables with an "idx" column, allowing easy identification of the index + of a row even when the table is re-sorted in the browser. [#4404] + +- Added ``AttributeError`` when trying to set mask on non-masked table. [#4637] + +- Allow to use a tuple of keys in ``Table.sort``. [#4671] + +- Added ``itercols``; a way to iterate through columns of a table. [#3805, + #4888] + +- ``Table.show_in_notebook`` and the default notebook display (i.e., + ``Table._repr_html_``) now use consistent table styles which can be set + using the ``astropy.table.default_notebook_table_class`` configuration + item. [#4886] + +- Added interface to create ``Table`` directly from any table-like object + that has an ``__astropy_table__`` method. [#4885] + +astropy.tests +^^^^^^^^^^^^^ + +- Enable test runner to obtain documentation source files from directory + other than "docs". [#4748] + +astropy.time +^^^^^^^^^^^^ + +- Added caching of scale and format transformations for improved performance. + [#4422] + +- Recent Earth rotation (IERS) data are now auto-downloaded so that UT1 + transformations for future times now work out of the box. [#4436] + +- Add support for barycentric/heliocentric time corrections. [#4314] + +astropy.units +^^^^^^^^^^^^^ + +- The option to use tuples to indicate fractional powers of units, + deprecated in 0.3.1, has been removed. [#4449] + +- Added slug to imperial units. [#4670] + +- Added Earth radius (``R_earth``) and Jupiter radius (``R_jup``) to units. + [#4818] + +- Added a ``represents`` property to allow access to the definition of a + named unit (e.g., ``u.kpc.represents`` yields ``1000 pc``). [#4806] + +- Add bolometric absolute and apparent magnitudes, ``M_bol`` and ``m_bol``. + [#4262] + +astropy.utils +^^^^^^^^^^^^^ + +- ``Path`` object could be passed to ``get_readable_fileobj``. [#4606] + +- Implemented a generic and extensible way of merging metadata. [#4459] + +- Added ``format_doc`` decorator which allows to replace and/or format the + current docstring of an object. [#4242] + +- Added a new context manager ``set_locale`` to temporarily set the + current locale. [#4363] + +- Added new IERS_Auto class to auto-download recent IERS (Earth rotation) + data when required by coordinate or time transformations. [#4436] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- Add zscale interval based on Numdisplay's implementation. [#4776] + +API changes +----------- + +astropy.config +^^^^^^^^^^^^^^ + +- The deprecated ``ConfigurationItem`` and ``ConfigAlias`` classes and the + ``save_config``, ``get_config_items``, and ``generate_all_config_items`` + functions have now been removed. [#2767, #4446] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Removed compatibility layer for pre-v0.4 API. [#4447] + +- Added ``copy`` keyword-only argument to allow initialization without + copying the (possibly large) input coordinate arrays. [#4883] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- Improve documentation of z validity range of cosmology objects [#4882, #4949] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Add a way to control HTML escaping when writing a table as an HTML file. [#4423] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Two optional boolean arguments ``ignore_missing`` and ``remove_all`` are + added to ``Header.remove``. [#5020] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Renamed ``Redshift`` model to ``RedshiftScaleFactor``. [#3672] + +- Inputs (``coords`` and ``out``) to ``render`` function in ``Model`` are + converted to float. [#4697] + +- ``RotateNative2Celestial`` and ``RotateCelestial2Native`` are now + implemented as subclasses of ``EulerAngleRotation``. [#4881, #4940] + +astropy.nddata +^^^^^^^^^^^^^^ + +- ``NDDataBase`` does not set the private uncertainty property anymore. This + only affects you if you subclass ``NDDataBase`` directly. [#4270] + +- ``NDDataBase``: the ``uncertainty``-setter is removed. A similar one is + added in ``NDData`` so this also only affects you if you subclassed + ``NDDataBase`` directly. [#4270] + +- ``NDDataBase``: ``uncertainty``-getter returns ``None`` instead of the + private uncertainty and is now abstract. This getter is moved to + ``NDData`` so it only affects direct subclasses of ``NDDataBase``. [#4270] + +- ``NDData`` accepts a Quantity-like data and an explicitly given unit. + Before a ValueError was raised in this case. The final instance will use the + explicitly given unit-attribute but doesn't check if the units are + convertible and the data will not be scaled. [#4270] + +- ``NDData`` : the given mask, explicit or implicit if the data was masked, + will be saved by the setter. It will not be saved directly as the private + attribute. [#4879] + +- ``NDData`` accepts an additional argument ``copy`` which will copy every + parameter before it is saved as attribute of the instance. [#4270] + +- ``NDData``: added an ``uncertainty.getter`` that returns the private + attribute. It is equivalent to the old ``NDDataBase.uncertainty``-getter. + [#4270] + +- ``NDData``: added an ``uncertainty.setter``. It is slightly modified with + respect to the old ``NDDataBase.uncertainty``-setter. The changes include: + +- if the uncertainty has no uncertainty_type an info message is printed + instead of a TypeError and the uncertainty is saved as + ``UnknownUncertainty`` except the uncertainty is None. [#4270] + +- the requirement that the uncertainty_type of the uncertainty needs to be a + string was removed. [#4270] + +- if the uncertainty is a subclass of NDUncertainty the parent_nddata + attribute will be set so the uncertainty knows to which data it belongs. + This is also a Bugfix. [#4152, #4270] + +- ``NDData``: added a ``meta``-getter, which will set and return an empty + OrderedDict if no meta was previously set. [#4509, #4469] + +- ``NDData``: added an ``meta``-setter. It requires that the meta is + dictionary-like (it also accepts Headers or ordered dictionaries and others) + or None. [#4509, #4469, #4921] + +- ``NDArithmeticMixin``: The operand in arithmetic methods (``add``, ...) + doesn't need to be a subclass of ``NDData``. It is sufficient if it can be + converted to one. This conversion is done internally. [#4272] + +- ``NDArithmeticMixin``: The arithmetic methods allow several new arguments to + control how or if different attributes of the class will be processed during + the operation. [#4272] + +- ``NDArithmeticMixin``: Giving the parameter ``propagate_uncertainties`` as + positional keyword is deprecated and will be removed in the future. You now + need to specify it as keyword-parameter. Besides ``True`` and ``False`` also + ``None`` is now a valid value for this parameter. [#4272, #4851] + +- ``NDArithmeticMixin``: The wcs attribute of the operands is not compared and + thus raises no ValueError if they differ, except if a ``compare_wcs`` + parameter is specified. [#4272] + +- ``NDArithmeticMixin``: The arithmetic operation was split from a general + ``_arithmetic`` method to different specialized private methods to allow + subclasses more control on how the attributes are processed without + overriding ``_arithmetic``. The ``_arithmetic`` method is now used to call + these other methods. [#4272] + +- ``NDSlicingMixin``: If the attempt at slicing the mask, wcs or uncertainty + fails with a ``TypeError`` a Warning is issued instead of the TypeError. [#4271] + +- ``NDUncertainty``: ``support_correlated`` attribute is deprecated in favor of + ``supports_correlated`` which is a property. Also affects + ``StdDevUncertainty``. [#4272] + +- ``NDUncertainty``: added the ``__init__`` that was previously implemented in + ``StdDevUncertainty`` and takes an additional ``unit`` parameter. [#4272] + +- ``NDUncertainty``: added a ``unit`` property without setter that returns the + set unit or if not set the unit of the parent. [#4272] + +- ``NDUncertainty``: included a ``parent_nddata`` property similar to the one + previously implemented in StdDevUncertainty. [#4272] + +- ``NDUncertainty``: added an ``array`` property with setter. The setter will + convert the value to a plain numpy array if it is a list or a subclass of a + numpy array. [#4272] + +- ``NDUncertainty``: ``propagate_multiply`` and similar were removed. Before + they were abstract properties and replaced by methods with the same name but + with a leading underscore. The entry point for propagation is a method + called ``propagate``. [#4272] + +- ``NDUncertainty`` and subclasses: implement a representation (``__repr__``). + [#4787] + +- ``StdDevUncertainty``: error propagation allows an explicitly given + correlation factor, which may be a scalar or an array which will be taken + into account during propagation. + This correlation must be determined manually and is not done by the + uncertainty! [#4272] + +- ``StdDevUncertainty``: the ``array`` is converted to a plain numpy array + only if it's a list or a subclass of numpy.ndarray. Previously it was always + cast to a numpy array but also allowed subclasses. [#4272] + +- ``StdDevUncertainty``: setting the ``parent_nddata`` does not compare if the + shape of it's array is identical to the parents data shape. [#4272] + +- ``StdDevUncertainty``: the ``array.setter`` doesn't compare if the array has + the same shape as the parents data. [#4272] + +- ``StdDevUncertainty``: deprecated ``support_correlated`` in favor of + ``supports_correlated``. [#4272, #4828] + +- ``StdDevUncertainty``: deprecated ``propagate_add`` and similar methods in + favor of ``propagate``. [#4272, #4828] + +- Allow ``data`` to be a named argument in ``NDDataArray``. [#4626] + +astropy.table +^^^^^^^^^^^^^ + +- ``operations.unique`` now has a ``keep`` parameter, which allows + one to select whether to keep the first or last row in a set of + duplicate rows, or to remove all rows that are duplicates. [#4632] + +- ``QTable`` now behaves more consistently by making columns act as a + ``Quantity`` even if they are assigned a unit after the table is + created. [#4497, #4884] + +astropy.units +^^^^^^^^^^^^^ + +- Remove deprecated ``register`` argument for Unit classes. [#4448] + +astropy.utils +^^^^^^^^^^^^^ + +- The astropy.utils.compat.argparse module has now been deprecated. Use the + Python 'argparse' module directly instead. [#4462] + +- The astropy.utils.compat.odict module has now been deprecated. Use the + Python 'collections' module directly instead. [#4466] + +- The astropy.utils.compat.gzip module has now been deprecated. Use the + Python 'gzip' module directly instead. [#4464] + +- The deprecated ``ScienceStateAlias`` class has been removed. [#2767, #4446] + +- The astropy.utils.compat.subprocess module has now been deprecated. Use the + Python 'subprocess' module instead. [#4483] + +- The astropy.utils.xml.unescaper module now also unescapes ``'%2F'`` to + ``'/'`` and ``'&&'`` to ``'&'`` in a given URL. [#4699] + +- The astropy.utils.metadata.MetaData descriptor has now two optional + parameters: doc and copy. [#4921] + +- The default IERS (Earth rotation) data now is now auto-downloaded via a + new class IERS_Auto. When extrapolating UT1-UTC or polar motion values + outside the available time range, the values are now clipped at the last + available value instead of being linearly extrapolated. [#4436] + +astropy.wcs +^^^^^^^^^^^ + +- WCS objects can now be initialized with an ImageHDU or + PrimaryHDU object. [#4493, #4505] + +- astropy.wcs now issues an INFO message when the header has SIP coefficients but + "-SIP" is missing from CTYPE. [#4814] + +Bug fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Ameliorate a problem with ``get_sun`` not round-tripping due to + approximations in the light deflection calculation. [#4952] + +- Ensure that ``angle_utilities.position_angle`` accepts floats, as stated + in the docstring. [#3800] + +- Ensured that transformations for ``GCRS`` frames are correct for + non-geocentric observers. [#4986] + +- Fixed a problem with the ``Quantity._repr_latex_`` method causing errors + when showing an ``EarthLocation`` in a Jupyter notebook. [#4542, #5068] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Fix a problem where the fast reader (with use_fast_converter=False) can + fail on non-US locales. [#4363] + +- Fix astropy.io.ascii.read handling of units for IPAC formatted files. + Columns with no unit are treated as unitless not dimensionless. [#4867, + #4947] + +- Fix problems the header parsing in the sextractor reader. [#4603, #4910] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- ``GroupsHDU.is_image`` property is now set to ``False``. [#4742] + +- Ensure scaling keywords are removed from header when unsigned integer data + is converted to signed type. [#4974, #5053] + +- Made TFORMx keyword check more flexible in test of compressed images to + enable compatibility of the test with cfitsio 3.380. [#4646, #4653] + +astropy.io.misc +^^^^^^^^^^^^^^^ + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- The astropy.io.votable.validator.html module is updated to handle division + by zero when generating validation report. [#4699] + +- KeyError when converting Table v1.2 numeric arrays fixed. [#4782] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Refactored ``AiryDisk2D``, ``Sersic1D``, and ``Sersic2D`` models + to be able to combine them as classes as well as instances. [#4720] + +- Modified the "LevMarLSQFitter" class to use the weights in the + calculation of the Jacobian. [#4751] + +astropy.nddata +^^^^^^^^^^^^^^ + +- ``NDData`` giving masked_Quantities as data-argument will use the + implicitly passed mask, unit and value. [#4270] + +- ``NDData`` using a subclass implementing ``NDData`` with + ``NDArithmeticMixin`` now allows error propagation. [#4270] + +- Fixed memory leak that happened when uncertainty of ``NDDataArray`` was + set. [#4825, #4862] + +- ``StdDevUncertainty``: During error propagation the unit of the uncertainty + is taken into account. [#4272] + +- ``NDArithmeticMixin``: ``divide`` and ``multiply`` yield correct + uncertainties if only one uncertainty is set. [#4152, #4272] + +astropy.stats +^^^^^^^^^^^^^ + +- Fix ``sigma_clipped_stats`` to use the ``axis`` argument. [#4726, #4808] + +astropy.table +^^^^^^^^^^^^^ + +- Fixed bug where Tables created from existing Table objects were not + inheriting the ``primary_key`` attribute. [#4672, #4930] + +- Provide more detail in the error message when reading a table fails due to a + problem converting column string values. [#4759] + +astropy.units +^^^^^^^^^^^^^ + +- Exponentiation using a ``Quantity`` with a unit equivalent to dimensionless + as base and an ``array``-like exponent yields the correct result. [#4770] + +- Ensured that with ``spectral_density`` equivalency one could also convert + between ``photlam`` and ``STmag``/``ABmag``. [#5017] + +astropy.utils +^^^^^^^^^^^^^ + +- The astropy.utils.compat.fractions module has now been deprecated. Use the + Python 'fractions' module directly instead. [#4463] + +- Added ``format_doc`` decorator which allows to replace and/or format the + current docstring of an object. [#4242] + +- Attributes using the astropy.utils.metadata.MetaData descriptor are now + included in the sphinx documentation. [#4921] + +astropy.vo +^^^^^^^^^^ + +- Relaxed expected accuracy of Cone Search prediction test to reduce spurious + failures. [#4382] + +astropy.wcs +^^^^^^^^^^^ + +- astropy.wcs.to_header removes "-SIP" from CTYPE when SIP coefficients + are not written out, i.e. ``relax`` is either ``False`` or ``None``. + astropy.wcs.to_header appends "-SIP" to CTYPE when SIP coefficients + are written out, i.e. ``relax=True``. [#4814] + +- Made ``wcs.bounds_check`` call ``wcsprm_python2c``, which means it + works even if ``wcs.set`` has not been called yet. [#4957, #4966]. + +- WCS objects can no longer be reverse-indexed, which was technically + permitted but incorrectly implemented previously [#4962] + +Other Changes and Additions +--------------------------- + +- Python 2.6 is no longer supported. [#4486] + +- The bundled version of py.test has been updated to 2.8.3. [#4349] + +- Reduce Astropy's import time (``import astropy``) by almost a factor 2. [#4649] + +- Cython prerequisite for building changed to v0.19 in install.rst [#4705, + #4710, #4719] + +- All astropy.modeling functionality that was deprecated in Astropy 1.0 has + been removed. [#4857] + +- Added instructions for installing Astropy into CASA. [#4840] + +- Added an example gallery to the docs demonstrating short + snippets/examples. [#4734] + + +1.1.2 (2016-03-10) +================== + +New Features +------------ + +astropy.wcs +^^^^^^^^^^^ + +- The ``astropy.wcs`` module now exposes ``WCSHDO_P*`` constants that can be + used to allow more control over output precision when using the ``relax`` + keyword argument. [#4616] + +Bug Fixes +--------- + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Fixed handling of CDS data file when no description is given and also + included stripping out of markup for missing value from description. [#4437, #4474] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fixed possible segfault during error handling in FITS tile + compression. [#4489] + +- Fixed crash on pickling of binary table columns with the 'X', 'P', or + 'Q' format. [#4514] + +- Fixed memory / reference leak that could occur when copying a ``FITS_rec`` + object (the ``.data`` for table HDUs). [#520] + +- Fixed a memory / reference leak in ``FITS_rec`` that occurred in a wide + range of cases, especially after writing FITS tables to a file, but in + other cases as well. [#4539] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Fix a bug to allow instantiation of a modeling class having a parameter + with a custom setter that takes two parameters ``(value, model)`` [#4656] + +astropy.table +^^^^^^^^^^^^^ + +- Fixed bug when replacing a table column with a mixin column like + Quantity or Time. [#4601] + +- Disable initial ordering in jsviewer (``show_in_browser``, + ``show_in_notebook``) to respect the order from the Table. [#4628] + +astropy.units +^^^^^^^^^^^^^ + +- Fixed sphinx issues on plotting quantities. [#4527] + +astropy.utils +^^^^^^^^^^^^^ + +- Fixed latex representation of function units. [#4563] + +- The ``zest.releaser`` hooks included in Astropy are now injected locally to + Astropy, rather than being global. [#4650] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- Fixed ``fits2bitmap`` script to allow ext flag to contain extension + names or numbers. [#4468] + +- Fixed ``fits2bitmap`` default output filename generation for + compressed FITS files. [#4468] + +- Fixed ``quantity_support`` to ensure its conversion returns ndarray + instances (needed for numpy >=1.10). [#4654] + +astropy.wcs +^^^^^^^^^^^ + +- Fixed possible exception in handling of SIP headers that was introduced in + v1.1.1. [#4492] + +- Fixed a bug that caused WCS objects with a high dynamic range of values for + certain parameters to lose precision when converted to a header. This + occurred for example in cases of spectral cubes, where a spectral axis in + Hz might have a CRVAL3 value greater than 1e10 but the spatial coordinates + would have CRVAL1/2 values 8 to 10 orders of magnitude smaller. This bug + was present in Astropy 1.1 and 1.1.1 but not 1.0.x. This has now been fixed + by ensuring that all WCS keywords are output with 14 significant figures by + default. [#4616] + +Other Changes and Additions +--------------------------- + +- Updated bundled astropy-helpers to v1.1.2. [#4678] + +- Updated bundled copy of WCSLIB to 5.14. [#4579] + + +1.1.1 (2016-01-08) +================== + +New Features +------------ + +astropy.io.registry +^^^^^^^^^^^^^^^^^^^ + +- Allow ``pathlib.Path`` objects (available in Python 3.4 and later) for + specifying the file name in registry read / write functions. [#4405] + +astropy.utils +^^^^^^^^^^^^^ + +- ``console.human_file_size`` now accepts quantities with byte-equivalent + units [#4373] + +Bug Fixes +--------- + +astropy.analytic_functions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed the blackbody functions' handling of overflows on some platforms + (Windows with MSVC, older Linux versions) with a buggy ``expm1`` function. + [#4393] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fixed an bug where updates to string columns in FITS tables were not saved + on Python 3. [#4452] + +Other Changes and Additions +--------------------------- + +- Updated bundled astropy-helpers to v1.1.1. [#4413] + + +1.1 (2015-12-11) +================ + +New Features +------------ + +astropy.config +^^^^^^^^^^^^^^ + +- Added new tools ``set_temp_config`` and ``set_temp_cache`` which can be + used either as function decorators or context managers to temporarily + use alternative directories in which to read/write the Astropy config + files and download caches respectively. This is especially useful for + testing, though ``set_temp_cache`` may also be used as a way to provide + an alternative (application specific) download cache for large data files, + rather than relying on the default cache location in users' home + directories. [#3975] + +astropy.constants +^^^^^^^^^^^^^^^^^ + +- Added the Thomson scattering cross-section. [#3839] + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- Added Moffat2DKernel. [#3965] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Added ``get_constellation`` function and ``SkyCoord.get_constellation`` + convenience method to determine the constellation that a coordinate + is in. [#3758] + +- Added ``PrecessedGeocentric`` frame, which is based on GCRS, but precessed + to a specific requested mean equinox. [#3758] + +- Added ``Supergalactic`` frame to support de Vaucouleurs supergalactic + coordinates. [#3892] + +- ``SphericalRepresentation`` now has a ``._unit_representation`` class attribute to specify + an equivalent UnitSphericalRepresentation. This allows subclasses of + representations to pair up correctly. [#3757] + +- Added functionality to support getting the locations of observatories by + name. See ``astropy.coordinates.EarthLocation.of_site``. [#4042] + +- Added ecliptic coordinates, including ``GeocentricTrueEcliptic``, + ``BarycentricTrueEcliptic``, and ``HeliocentricTrueEcliptic``. [#3749] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- Add Planck 2015 cosmology [#3476] + +- Distance calculations now > 20-40x faster for the supplied + cosmologies due to implementing Cython scalar versions of + ``FLRW.inv_efunc``.[#4127] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Automatically use ``guess=False`` when reading if the file ``format`` is + provided and the format parameters are uniquely specified. This update + also removes duplicate format guesses to improve performance. [#3418] + +- Calls to ascii.read() for fixed-width tables may now omit one of the keyword + arguments ``col_starts`` or ``col_ends``. Columns will be assumed to begin and + end immediately adjacent to each other. [#3657] + +- Add a function ``get_read_trace()`` that returns a traceback of the + attempted read formats for the last call to ``astropy.io.ascii.read``. [#3688] + +- Supports LZMA decompression via ``get_readable_fileobj`` [#3667] + +- Allow ``-`` character is Sextractor format column names. [#4168] + +- Improve DAOphot reader to read multi-aperture files [#3535, #4207] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Support reading and writing from bzip2 compressed files. i.e. ``.fits.bz2`` + files. [#3789] + +- Included a new command-line script called ``fitsinfo`` to display + a summary of the HDUs in one or more FITS files. [#3677] + +astropy.io.misc +^^^^^^^^^^^^^^^ + +- Support saving all meta information, description and units of tables and columns + in HDF5 files [#4103] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- A new method was added to ``astropy.io.votable.VOTable``, + ``get_info_by_id`` to conveniently find an ``INFO`` element by its + ``ID`` attribute. [#3633] + +- Instances in the votable tree now have better ``__repr__`` methods. [#3639] + +astropy.logger.py +^^^^^^^^^^^^^^^^^ + +- Added log levels (e.g., DEBUG, INFO, CRITICAL) to ``astropy.log`` [#3947] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Added a new ``Parameter.validator`` interface for setting a validation + method on individual model parameters. See the ``Parameter`` + documentation for more details. [#3910] + +- The projection classes that are named based on the 3-letter FITS + WCS projections (e.g. ``Pix2Sky_TAN``) now have aliases using + longer, more descriptive names (e.g. ``Pix2Sky_Gnomonic``). + [#3583] + +- All of the standard FITS WCS projection types have been + implemented in ``astropy.modeling.projections`` (by wrapping + WCSLIB). [#3906] + +- Added ``Sersic1D`` and ``Sersic2D`` model classes. [#3889] + +- Added the Voigt profile to existing models. [#3901] + +- Added ``bounding_box`` property and ``render_model`` function [#3909] + +astropy.nddata +^^^^^^^^^^^^^^ + +- Added ``block_reduce`` and ``block_replicate`` functions. [#3453] + +- ``extract_array`` now offers different options to deal with array + boundaries [#3727] + +- Added a new ``Cutout2D`` class to create postage stamp image cutouts + with optional WCS propagation. [#3823] + +astropy.stats +^^^^^^^^^^^^^ + +- Added ``sigma_lower`` and ``sigma_upper`` keywords to + ``sigma_clip`` to allow for unsymmetric clipping. [#3595] + +- Added ``cenfunc``, ``stdfunc``, and ``axis`` keywords to + ``sigma_clipped_stats``. [#3792] + +- ``sigma_clip`` automatically masks invalid input values (NaNs, Infs) before + performing the clipping [#4051] + +- Added the ``histogram`` routine, which is similar to ``np.histogram`` but + includes several additional options for automatic determination of optimal + histogram bins. Associated helper routines include ``bayesian_blocks``, + ``friedman_bin_width``, ``scott_bin_width``, and ``knuth_bin_width``. + This functionality was ported from the astroML_ library. [#3756] + +- Added the ``bayesian_blocks`` routine, which implements a dynamic algorithm + for locating change-points in various time series. [#3756] + +- A new function ``poisson_conf_interval()`` was added to allow easy calculation + of several standard formulae for the error bars on the mean of a Poisson variable + estimated from a single sample. + +astropy.table +^^^^^^^^^^^^^ + +- ``add_column()`` and ``add_columns()`` now have ``rename_duplicate`` + option to rename new column(s) rather than raise exception when its name + already exists. [#3592] + +- Added ``Table.to_pandas`` and ``Table.from_pandas`` for converting to/from + pandas dataframes. [#3504] + +- Initializing a ``Table`` with ``Column`` objects no longer requires + that the column ``name`` attribute be defined. [#3781] + +- Added an ``info`` property to ``Table`` objects which provides configurable + summary information about the table and its columns. [#3731] + +- Added an ``info`` property to column classes (``Column`` or mixins). This + serves a dual function of providing configurable summary information about + the column, and acting as a manager of column attributes such as + name, format, or description. [#3731] + +- Updated table and column representation to use the ``dtype_info_name`` + function for the dtype value. Removed the default "masked=False" + from the table representation. [#3868, #3869] + +- Updated row representation to be consistent with the corresponding + table representation for that row. Added HTML representation so a + row displays nicely in IPython notebook. + +- Added a new table indexing engine allowing for the creation of + indices on one or more columns of a table using ``add_index``. These + indices enable new functionality such as searching for rows by value + using ``loc`` and ``iloc``, as well as increased performance for + certain operations. [#3915, #4202] + +- Added capability to include a structured array or recarray in a table + as a mixin column. This allows for an approximation of nested tables. + [#3925] + +- Added ``keep_byteorder`` option to ``Table.as_array()``. See the + "API Changes" section below. [#4080] + +- Added a new method ``Table.replace_column()`` to replace an existing + column with a new data column. [#4090] + +- Added a ``tableclass`` option to ``Table.pformat()`` to allow specifying + a list of CSS classes added to the HTML table. [#4131] + +- New CSS for jsviewer table [#2917, #2982, #4174] + +- Added a new ``Table.show_in_notebook`` method that shows an interactive view + of a Table (similar to ``Table.show_in_browser(jsviewer=True)``) in an + Python/Jupyter notebook. [#4197] + +- Added column alignment formatting for better pprint viewing + experience. [#3644] + +astropy.tests +^^^^^^^^^^^^^ + +- Added new test config options, ``config_dir`` and ``cache_dir`` (these + can be edited in ``setup.cfg`` or as extra command-line options to + py.test) for setting the locations to use for the Astropy config files + and download caches (see also the related ``set_temp_config/cache`` + features added in ``astropy.config``). [#3975] + +astropy.time +^^^^^^^^^^^^ + +- Add support for FITS standard time strings. [#3547] + +- Allow the ``format`` attribute to be updated in place to change the + default representation of a ``Time`` object. [#3673] + +- Add support for shape manipulation (reshape, ravel, etc.). [#3224] + +- Add argmin, argmax, argsort, min, max, ptp, sort methods. [#3681] + +- Add ``Time.to_datetime`` method for converting ``Time`` objects to + timezone-aware datetimes. [#4119, #4124] + +astropy.units +^^^^^^^^^^^^^ + +- Added furlong to imperial units. [#3529] + +- Added mil to imperial units. [#3716] + +- Added stone to imperial units. [#4192] + +- Added Earth Mass (``M_earth``) and Jupiter mass (``M_jup``) to units [#3907] + +- Added support for functional units, in particular the logarithmic ones + ``Magnitude``, ``Decibel``, and ``Dex``. [#1894] + +- Quantities now work with the unit support in matplotlib. See + :ref:`plotting-quantities`. [#3981] + +- Clarified imperial mass measurements and added pound force (lbf), + kilopound (kip), and pound per square inch (psi). [#3409] + +astropy.utils +^^^^^^^^^^^^^ + +- Added new ``OrderedDescriptor`` and ``OrderedDescriptorContainer`` utility + classes that make it easier to implement classes with declarative APIs, + wherein class-level attributes have an inherit "ordering" to them that is + specified by the order in which those attributes are defined in the class + declaration (by defining them using special descriptors that have + ``OrderedDescriptor`` as a base class). See the API documentation for + these classes for more details. Coordinate frames and models now use this + interface. [#3679] + +- The ``get_pkg_data_*`` functions now take an optional ``package`` argument + which allows specifying any package to read package data filenames or + content out of, as opposed to only being able to use data from the package + that the function is called from. [#4079] + +- Added function ``dtype_info_name`` to the ``data_info`` module to provide + the name of a ``dtype`` for human-readable informational purposes. [#3868] + +- Added ``classproperty`` decorator--this is to ``property`` as + ``classmethod`` is to normal instance methods. [#3982] + +- ``iers.open`` now handles network URLs, as well as local paths. [#3850] + +- The ``astropy.utils.wraps`` decorator now takes an optional + ``exclude_args`` argument not shared by the standard library ``wraps`` + decorator (as it is unique to the Astropy version's ability of copying + the wrapped function's argument signature). ``exclude_args`` allows + certain arguments on the wrapped function to be excluded from the signature + of the wrapper function. This is particularly useful when wrapping an + instance method as a function (to exclude the ``self`` argument). [#4017] + +- ``get_readable_fileobj`` can automatically decompress LZMA ('.xz') + files using the ``lzma`` module of Python 3.3+ or, when available, the + ``backports.lzma`` package on earlier versions. [#3667] + +- The ``resolve_name`` utility now accepts any number of additional + positional arguments that are automatically dotted together with the + first ``name`` argument. [#4083] + +- Added ``is_url_in_cache`` for resolving paths to cached files via URLS + and checking if files exist. [#4095] + +- Added a ``step`` argument to the ``ProgressBar.map`` method to give + users control over the update frequency of the progress bar. [#4191] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- Added a function / context manager ``quantity_support`` for enabling + seamless plotting of ``Quantity`` instances in matplotlib. [#3981] + +- Added the ``hist`` function, which is similar to ``plt.hist`` but + includes several additional options for automatic determination of optimal + histogram bins. This functionality was ported from the astroML_ library. + [#3756] + +astropy.wcs +^^^^^^^^^^^ + +- The included version of wcslib has been upgraded to 5.10. [#3992, #4239] + + The minimum required version of wcslib in the 4.x series remains 4.24. + + The minimum required version of wcslib in the 5.x series is + 5.8. Building astropy against a wcslib 5.x prior to 5.8 + will raise an ``ImportError`` when ``astropy.wcs`` is imported. + + The wcslib changes relevant to astropy are: + +- The FITS headers returned by ``astropy.wcs.WCS.to_header`` and + ``astropy.wcs.WCS.to_header_string`` now include values with + more precision. This will result in numerical differences in + your results if you convert ``astropy.wcs.WCS`` objects to FITS + headers and use the results. + +- ``astropy.wcs.WCS`` now recognises the ``TPV``, ``TPD``, + ``TPU``, ``DSS``, ``TNX`` and ``ZPX`` polynomial distortions. + +- Added relaxation flags to allow ``PC0i_0ja``, ``PV0j_0ma``, and + ``PS0j_0ma`` (i.e. with leading zeroes on the index). + +- Tidied up error reporting, particularly relating to translating + status returns from lower-level functions. + +- Changed output formatting of floating point values in + ``to_header``. + +- Enhanced text representation of ``WCS`` objects. [#3604] + +- The ``astropy.tests.helper`` module is now part of the public API (and has a + documentation page). This module was in previous releases of astropy, + but was not considered part of the public API until now. [#3890] + +.. _astroML: http://astroML.org + +- There is a new function ``astropy.online_help`` to search the + astropy documentation and display the result in a web + browser. [#3642] + +API changes +----------- + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- ``FLRW._tfunc`` and ``FLRW._xfunc`` are marked as deprecated. Users + should use the new public interfaces ``FLRW.lookback_time_integrand`` + and ``FLRW.abs_distance_integrand`` instead. [#3767] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- The default header line processing was made to be consistent with data line + processing in that it now ignores blank lines that may have whitespace + characters. Any code that explicitly specifies a ``header_start`` value + for parsing a file with blank lines in the header containing whitespace will + need to be updated. [#2654] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- The ``uint`` argument to ``fits.open`` is now True by default; that is, + arrays using the FITS unsigned integer convention will be detected, and + read as unsigned integers by default. A new config option for + ``io.fits``, ``enable_uint``, can be changed to False to revert to the + original behavior of ignoring the ``uint`` convention unless it is + explicitly requested with ``uint=True``. [#3916] + +- The ``ImageHDU.NumCode`` and ``ImageHDU.ImgCode`` attributes (and same + for other classes derived from ``_ImageBaseHDU``) are deprecated. Instead, + the ``astropy.io.fits`` module-level constants ``BITPIX2DTYPE`` and + ``DTYPE2BITPIX`` can be used. [#3916] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Note: Comparisons of model parameters with array-like values now + yields a Numpy boolean array as one would get with normal Numpy + array comparison. Previously this returned a scalar True or False, + with True only if the comparison was true for all elements compared, + which could lead to confusing circumstances. [#3912] + +- Using ``model.inverse = None`` to reset a model's inverse to its + default is deprecated. In the future this syntax will explicitly make + a model not have an inverse (even if it has a default). Instead, use + ``del model.inverse`` to reset a model's inverse to its default (if it + has a default, otherwise this just deletes any custom inverse that has + been assigned to the model and is still equivalent to setting + ``model.inverse = None``). [#4236] + +- Adds a ``model.has_user_inverse`` attribute which indicates whether or not + a user has assigned a custom inverse to ``model.inverse``. This is just + for informational purposes, for example, for software that introspects + model objects. [#4236] + +- Renamed the parameters of ``RotateNative2Celestial`` and + ``RotateCelestial2Native`` from ``phi``, ``theta``, ``psi`` to + ``lon``, ``lat`` and ``lon_pole``. [#3578] + +- Deprecated the ``Pix2Sky_AZP.check_mu`` and ``Sky2Pix_AZP.check_mu`` + methods (these were obscure "accidentally public" methods that were + probably not used by anyone). [#3910] + +- Added a phase parameter to the Sine1D model. [#3807] + +astropy.stats +^^^^^^^^^^^^^ + +- Renamed the ``sigma_clip`` ``sig`` keyword as ``sigma``. [#3595] + +- Changed the ``sigma_clip`` ``varfunc`` keyword to ``stdfunc``. [#3595] + +- Renamed the ``sigma_clipped_stats`` ``mask_val`` keyword to + ``mask_value``. [#3595] + +- Changed the default ``iters`` keyword value to 5 in both the + ``sigma_clip`` and ``sigma_clipped_stats`` functions. [#4067] + +astropy.table +^^^^^^^^^^^^^ + +- ``Table.as_array()`` always returns a structured array with each column in + the system's native byte order. The optional ``keep_byteorder=True`` + option will keep each column's data in its original byteorder. [#4080] + +- ``Table.simple_table()`` now creates tables with int64 and float64 types + instead of int32 and float64. [#4114] + +- An empty table can now be initialized without a ``names`` argument as long + as a valid ``dtype`` argument (with names embedded) is supplied. [#3977] + +astropy.time +^^^^^^^^^^^^ + +- The ``astropy_time`` attribute and time format has been removed from the + public interface. Existing code that instantiates a new time object using + ``format='astropy_time'`` can simply omit the ``format`` + specification. [#3857] + +astropy.units +^^^^^^^^^^^^^ + +- Single-item ``Quantity`` instances with record ``dtype`` will now have + their ``isscalar`` property return ``True``, consistent with behaviour for + numpy arrays, where ``np.void`` records are considered scalar. [#3899] + +- Three changes relating to the FITS unit format [#3993]: + +- The FITS unit format will no longer parse an arbitrary number as a + scale value. It must be a power of 10 of the form ``10^^k``, + ``10^k``, ``10+k``, ``10-k`` and ``10(k)``. [#3993] + +- Scales that are powers of 10 can be written out. Previously, any + non-1.0 scale was rejected. + +- The ``*`` character is accepted as a separator between the scale + and the units. + +- Unit formatter classes now require the ``parse`` and ``to_string`` + methods are now required to be classmethods (and the formatter + classes themselves are assumed to be singletons that are not + instantiated). As unit formatters are mostly an internal implementation + detail this is not likely to affect any users. [#4001] + +- CGS E&M units are now defined separately from SI E&M units, and have + distinct physical types. [#4255, #4355] + +astropy.utils +^^^^^^^^^^^^^ + +- All of the ``get_pkg_data_*`` functions take an optional ``package`` + argument as their second positional argument. So any code that previously + passed other arguments to these functions as positional arguments might + break. Use keyword argument passing instead to mitigate this. [#4079] + +- ``astropy.utils.iers`` now uses a ``QTable`` internally, which means that + the numerical columns are stored as ``Quantity``, with full support for + units. Furthermore, the ``ut1_utc`` method now returns a ``Quantity`` + instead of a float or an array (as did ``pm_xy`` already). [#3223] + +- ``astropy.utils.iers`` now throws an ``IERSRangeError``, a subclass + of ``IndexError``, rather than a raw ``IndexError``. This allows more + fine-grained catching of situations where a ``Time`` is beyond the range + of the loaded IERS tables. [#4302] + +astropy.wcs +^^^^^^^^^^^ + +- When compiled with wcslib 5.9 or later, the FITS headers returned + by ``astropy.wcs.WCS.to_header`` and + ``astropy.wcs.WCS.to_header_string`` now include values with more + precision. This will result in numerical differences in your + results if you convert ``astropy.wcs.WCS`` objects to FITS headers + and use the results. + +- If NAXIS1 or NAXIS2 is not passed with the header object to + WCS.calc_footprint, a ValueError is raised. [#3557] + +Bug fixes +--------- + +astropy.constants +^^^^^^^^^^^^^^^^^ + +- The constants ``Ry`` and ``u`` are now properly used inside the + corresponding units. The latter have changed slightly as a result. [#4229] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Internally, ``coordinates`` now consistently uses the appropriate time + scales for using ERFA functions. [#4302] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Fix a segfault in the fast C parser when one of the column headers + is empty [#3545]. + +- Fix several bugs that prevented the fast readers from being used + when guessing the file format. Also improved the read trace + information to better understand format guessing. [#4115] + +- Fix an underlying problem that resulted in an uncaught TypeError + exception when reading a CDS-format file with guessing enabled. [#4120] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- ``Simplex`` fitter now correctly passes additional keywords arguments to + the scipy solver. [#3966] + +- The keyword ``acc`` (for accuracy) is now correctly accepted by + ``Simplex``. [#3966] + +astropy.units +^^^^^^^^^^^^^ + +- The units ``Ryd`` and ``u`` are no longer hard-coded numbers, but depend + on the appropriate values in the ``constants`` module. As a result, these + units now imply slightly different conversions. [#4229] + +Other Changes and Additions +--------------------------- + +- The ``./setup.py test`` command is now implemented in the ``astropy.tests`` + module again (previously its implementation had been moved into + astropy-helpers). However, that made it difficult to synchronize changes + to the Astropy test runner with changes to the ``./setup.py test`` UI. + astropy-helpers v1.1 and above will detect this implementation of the + ``test`` command, when present, and use it instead of the old version that + was included in astropy-helpers (most users will not notice any difference + as a result of this change). [#4020] + +- The repr for ``Table`` no longer displays ``masked=False`` since tables + are not masked by default anyway. [#3869] + +- The version of ``PLY`` that ships with astropy has been updated to 3.6. + +- WCSAxes is now required for doc builds. [#4074] + +- The migration guide from pre-v0.4 coordinates has been removed to avoid + cluttering the ``astropy.coordinates`` documentation with increasingly + irrelevant material. To see the migration guide, we recommend you simply look + to the archived documentation for previous versions, e.g. + http://docs.astropy.org/en/v1.0/coordinates/index.html#migrating-from-pre-v0-4-coordinates + [#4203] + +- In ``astropy.coordinates``, the transformations between GCRS, CIRS, + and ITRS have been adjusted to more logically reflect the order in + which they actually apply. This should not affect most coordinate + transformations, but may affect code that is especially sensitive to + machine precision effects that change when the order in which + transformations occur is changed. [#4255] + +- Astropy v1.1.0 will be the last release series to officially support + Python 2.6. A deprecation warning will now be issued when using Astropy + in Python 2.6 (this warning can be disabled through the usual Python warning + filtering mechanisms). [#3779] + + +1.0.13 (2017-05-29) +=================== + +Bug Fixes +--------- + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fix use of quantize level parameter for ``CompImageHDU``. [#6029] + +- Prevent crash when a header contains non-ASCII (e.g. UTF-8) characters, to + allow fixing the problematic cards. [#6084] + + +1.0.12 (2017-03-05) +=================== + +Bug Fixes +--------- + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- Fixed bug in ``discretize_integrate_2D`` in which x and y coordinates + where swapped. [#5634] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Fixed a bug where ``get_transform`` could sometimes produce confusing errors + because of a typo in the input validation. [#5645] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Guard against extremely unlikely problems in compressed images, which + could lead to memory unmapping errors. [#5775] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- Fixed a bug where stdlib ``realloc()`` was used instead of + ``PyMem_Realloc()`` [#5696, #4739, #2100] + +astropy.utils +^^^^^^^^^^^^^ + +- Fixed ImportError with NumPy < 1.7 and Python 3.x in + ``_register_patched_dtype_reduce``. [#5848] + + +1.0.11 (2016-12-22) +=================== + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Initialising a SkyCoord from a list containing a single SkyCoord no longer removes + the distance from the coordinate. [#5270] + +- Fix errors in the implementation of the conversion to and from FK4 frames + without e-terms, which will have affected coordinates not on the unit + sphere (i.e., with distances). [#4293] + +- Fix bug where with cds units enabled it was no longer possible to initialize + an ``Angle``. [#5483] + +- Ensure that ``search_around_sky`` and ``search_around_3d`` return + integer type index arrays for empty (non) matches. [#4877, #5083] + +- Return an empty set of matches for ``search_around_sky`` and + ``search_around_3d`` when one or both of the input coordinate + arrays is empty. [#4875, #5083] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Fix a bug with empty value at end of tab-delimited table on Windows. [#5370] + +- Fix reading of big ASCII tables (more than 2Gb) with the fast reader. + [#5319] + +- Fix segfault with FastCsv and row with too many columns. [#5534] + +- Fix problem reading an AASTex format table that does not have ``\\`` + at the end of the last table row. [#5427] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Removed raising of AssertionError that could occur after closing or + deleting compressed image data. [#4690, #4694, #4948] + +- Fixed bug that caused an ignored exception to be displayed under certain + conditions when terminating a script after using fits.getdata(). [#4977] + +- Fixed usage of inplace operations that were raising an exception with + recent versions of Numpy due to implicit casting. [#5250] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- Fixed bug of ``Resource.__repr__()`` having undefined attributes and + variables. [#5382] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- CompoundModel now correctly inherits _n_models, allowing the use of model sets [#5358] + +astropy.units +^^^^^^^^^^^^^ + +- Fixed bug in Ci definition. [#5106] + +- Non-ascii cds unit strings are now correctly represented using ``str`` also + on python2. This solves bugs in parsing coordinates involving strings too. + [#5355] + +- Ensure ``Quantity`` supports ``np.float_power``, which is new in numpy 1.12. + [#5480] + +astropy.utils +^^^^^^^^^^^^^ + +- Fixed AttributeError when calling ``utils.misc.signal_number_to_name`` with + Python3 [#5430]. + +astropy.wcs +^^^^^^^^^^^ + +- Update the ``_naxis{x}`` attributes when calling ``WCS.slice``. [#5411] + + +Other Changes and Additions +--------------------------- + +- The bundled ERFA was updated to version 1.3.0. This includes the + leap second planned for 2016 Dec 31. [#5418] + +1.0.10 (2016-06-09) +=================== + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- ``SkyCoord`` objects created before a new frame which has frame attributes + is created no longer raise ``AttributeError`` when the new attributes are + accessed [#5021] + +- Fix some errors in the implementation of aberration for ``get_sun``. [#4979] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Fix problem reading a zero-length ECSV table with a bool type column. [#5010] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fix convenience functions (``getdata``, ``getheader``, ``append``, + ``update``) to close files. [#4786] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- The astropy.io.votable.validator.html module is updated to handle division + by zero when generating validation report. [#4699] + +astropy.table +^^^^^^^^^^^^^ + +- Fixed a bug where ``pprint()`` sometimes raises ``UnicodeDecodeError`` + in Python 2. [#4946] + +- Fix bug when doing outer join on multi-dimensional columns. [#4060] + +- Fixed bug where Tables created from existing Table objects were not + inheriting the ``primary_key`` attribute. [#4672] + +astropy.tests +^^^^^^^^^^^^^ + +- Fix coverage reporting in Python 3. [#4822] + +astropy.units +^^^^^^^^^^^^^ + +- Duplicates between long and short names are now removed in the ``names`` + and ``aliases`` properties of units. [#5036] + +astropy.utils +^^^^^^^^^^^^^ + +- The astropy.utils.xml.unescaper module now also unescapes ``'%2F'`` to + ``'/'`` and ``'&&'`` to ``'&'`` in a given URL. [#4699] + +- Fix two problems related to the download cache: clear_download_cache() does + not work in Python 2.7 and downloading in Python 2.7 and then Python 3 + can result in an exception. [#4810] + +astropy.vo +^^^^^^^^^^ + +- Cache option now properly caches both downloaded JSON database and XML VO + tables. [#4699] + +- The astropy.vo.validator.conf.conesearch_urls listing is updated to reflect + external changes to some VizieR Cone Search services. [#4699] + +- VOSDatabase decodes byte-string to UTF-8 instead of ASCII to avoid + UnicodeDecodeError for some rare cases. Fixed a Cone Search test that is + failing as a side-effect of #4699. [#4757] + +Other Changes and Additions +--------------------------- + +- Updated ``astropy.tests`` test runner code to work with Coverage v4.0 when + generating test coverage reports. [#4176] + + +1.0.9 (2016-03-10) +================== + +New Features +------------ + +astropy.nddata +^^^^^^^^^^^^^^ + +- ``NDArithmeticMixin`` check for matching WCS now works with + ``astropy.wcs.WCS`` objects [#4499, #4503] + +Bug Fixes +--------- + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- Correct a bug in which ``psf_pad`` and ``fft_pad`` would be ignored [#4366] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Fixed addition of new line characters after last row of data in + ascii.latex.AASTex. [#4561] + +- Fixed reading of Latex tables where the ``\tabular`` tag is in the first + line. [#4595] + +- Fix use of plain format strings with the fast writer. [#4517] + +- Fix bug writing space-delimited file when table has empty fields. [#4417] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fixed possible segfault during error handling in FITS tile + compression. [#4489] + +- Fixed crash on pickling of binary table columns with the 'X', 'P', or + 'Q' format. [#4514] + +- Fixed memory / reference leak that could occur when copying a ``FITS_rec`` + object (the ``.data`` for table HDUs). [#520] + +- Fixed a memory / reference leak in ``FITS_rec`` that occurred in a wide + range of cases, especially after writing FITS tables to a file, but in + other cases as well. [#4539] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Fixed display of compound model expressions and components when printing + compound model instances. [#4414, #4482] + +astropy.stats +^^^^^^^^^^^^^ + +- the input for median_absolute_deviation will not be cast to plain numpy + arrays when given subclasses of numpy arrays + (like Quantity, numpy.ma.MaskedArray, etc.) [#4658] + +- Fixed incorrect results when using median_absolute_deviation with masked + arrays. [#4658] + +astropy.utils +^^^^^^^^^^^^^ + +- The ``zest.releaser`` hooks included in Astropy are now injected locally to + Astropy, rather than being global. [#4650] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- Fixed ``fits2bitmap`` script to allow ext flag to contain extension + names or numbers. [#4468] + +- Fixed ``fits2bitmap`` default output filename generation for + compressed FITS files. [#4468] + + +1.0.8 (2016-01-08) +================== + +Bug Fixes +--------- + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fixed an bug where updates to string columns in FITS tables were not saved + on Python 3. [#4452] + +astropy.units +^^^^^^^^^^^^^ + +- In-place peak-to-peak calculations now work on ``Quantity``. [#4442] + +astropy.utils +^^^^^^^^^^^^^ + +- Fixed ``find_api_page`` to work correctly on python 3.x [#4378, #4379] + + +1.0.7 (2015-12-04) +================== + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Pickling of ``EarthLocation`` instances now also works on Python 2. [#4304] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Fix fast writer so bytestring column output is not prefixed by 'b' in + Python 3. [#4350] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fixed a regression that could cause writes of large FITS files to be + truncated. [#4307] + +- Astropy v1.0.6 included a fix (#4228) for an obscure case where the TDIM + of a table column is smaller than the repeat count of its data format. + This updates that fix in such a way that it works with Numpy 1.10 as well. + [#4266] + +astropy.table +^^^^^^^^^^^^^ + +- Fix a bug when pickling a Table with mixin columns (e.g. Time). [#4098] + +astropy.time +^^^^^^^^^^^^ + +- Fix incorrect ``value`` attribute for epoch formats like "unix" + when ``scale`` is different from the class ``epoch_scale``. [#4312] + +astropy.utils +^^^^^^^^^^^^^ + +- Fixed an issue where if ipython is installed but ipykernel is not + installed then importing astropy from the ipython console gave an + IPython.kernel deprecation warning. [#4279] + +- Fixed crash that could occur in ``ProgressBar`` when ``astropy`` is + imported in an IPython startup script. [#4274] + +Other Changes and Additions +--------------------------- + +- Updated bundled astropy-helpers to v1.0.6. [#4372] + + +1.0.6 (2015-10-22) +================== + +Bug Fixes +--------- + +astropy.analytic_functions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed blackbody analytic functions to properly support arrays of + temperatures. [#4251] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Fixed errors in transformations for objects within a few AU of the + Earth. Included substantive changes to transformation machinery + that may change distances at levels ~machine precision for other + objects. [#4254] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- ``fitsdiff`` and related functions now do a better job reporting differences + between values that are different types but have the same representation + (ex: the string '0' versus the number 0). [#4122] + +- Miscellaneous fixes for supporting Numpy 1.10. [#4228] + +- Fixed an issue where writing a column of unicode strings to a FITS table + resulted in a quadrupling of size of the column (i.e. the format of the + FITS column was 4 characters for every one in the original strings). + [#4228] + +- Added support for an obscure case (but nonetheless allowed by the FITS + standard) where a column has some TDIMn keyword, but a repeat count in + the TFORMn column greater than the number of elements implied by the + TDIMn. For example TFORMn = 100I, but TDIMn = '(5,5)'. In this case + the TDIMn implies 5x5 arrays in the column, but the TFORMn implies + a 100 element 1-D array in the column. In this case the TDIM takes + precedence, and the remaining bytes in the column are ignored. [#4228] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- Fixed crash with Python compiler optimization level = 2. [#4231] + +astropy.vo +^^^^^^^^^^ + +- Fixed ``check_conesearch_sites`` with ``parallel=True`` on Python >= 3.3 + and on Windows (it was broken in both those cases for separate reasons). + [#2970] + +Other Changes and Additions +--------------------------- + +- All tests now pass against Numpy v1.10.x. This implies nominal support for + Numpy 1.10.x moving forward (but there may still be unknown issues). For + example, there is already a known performance issue with tables containing + large multi-dimensional columns--for example, tables that contain entire + images in one or more of their columns. This is a known upstream issue in + Numpy. [#4259] + + +1.0.5 (2015-10-05) +================== + +Bug Fixes +--------- + +astropy.constants +^^^^^^^^^^^^^^^^^ + +- Rename units -> unit and error -> uncertainty in the ``repr`` and ``str`` + of constants to match attribute names. [#4147] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Fix string representation of ``SkyCoord`` objects transformed into + the ``AltAz`` frame [#4055, #4057] + +- Fix the ``search_around_sky`` function to allow ``storekdtree`` to be + ``False`` as was intended. [#4082, #4212] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fix bug when extending one header (without comments) with another + (with comments). [#3967] + +- Somewhat improved resource usage for FITS data--previously a new ``mmap`` + was opened for each HDU of a FITS file accessed through an ``HDUList``. + Each ``mmap`` used up a single file descriptor, causing problems with + system resource limits for some users. Now only a single ``mmap`` is + opened, and shared for the data of all HDUs. Note: The problem still + persists with using the "convenience" functions. For example using + ``fits.getdata`` will create one ``mmap`` per HDU read this way (as + opposed to opening the file with ``fits.open`` and accessing the HDUs + through the ``HDUList`` object). [#4097] + +- Fix bug where reading a file without a newline failed with an + unrelated / unhelpful exception. [#4160] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Cleaned up ``repr`` of models that have no parameters. [#4076] + +astropy.nddata +^^^^^^^^^^^^^^ + +- Initializing ``NDDataArray`` from another instance now sets ``flags`` as + expected and no longer fails when ``uncertainty`` is set [#4129]. + Initializing an ``NDData`` subclass from a parent instance + (eg. ``NDDataArray`` from ``NDData``) now sets the attributes other than + ``data`` as it should [#4130, #4137]. + +astropy.table +^^^^^^^^^^^^^ + +- Fix an issue with setting fill value when column dtype is changed. [#4088] + +- Fix bug when unpickling a bare Column where the _parent_table + attribute was not set. This impacted the Column representation. [#4099] + +- Fix issue with the web browser opening with an empty page, and ensure that + the url is correctly formatted for Windows. [#4132] + +- Fix NameError in table stack exception message. [#4213] + +astropy.utils +^^^^^^^^^^^^^ + +- ``resolve_name`` no longer causes ``sys.modules`` to be cluttered with + additional copies of modules under a package imported like + ``resolve_name('numpy')``. [#4084] + +- ``console`` was updated to support IPython 4.x and Jupyter 1.x. + This should suppress a ShimWarning that was appearing at + import of astropy with IPython 4.0 or later. [#4078] + +- Temporary downloaded files created by ``get_readable_fileobj`` when passed + a URL are now deleted immediately after the file is closed. [#4198] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- The color for axes labels was set to white. Since white labels on white + background are hard to read, the label color has been changed to black. + [#4143] + +- ``ImageNormalize`` now automatically determines ``vmin``/``vmax`` + (via the ``autoscale_None`` method) when they have not been set + explicitly. [#4117] + +astropy.vo +^^^^^^^^^^ + +- Cone Search validation no longer crashes when the provider gives an + incomplete test query. It also ensures search radius for a test query + is not too large to avoid timeout. [#4158, #4159] + +Other Changes and Additions +--------------------------- + +- Astropy now supports Python 3.5. [#4027] + +- Updated bundled version of astropy-helpers to 1.0.5. [#4215] + +- Updated tests to support py.test 2.7, and upgraded the bundled copy of + py.test to v2.7.3. [#4027] + + +1.0.4 (2015-08-11) +================== + +New Features +------------ + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- Modified Cython functions to release the GIL. This enables convolution + to be parallelized effectively and gives large speedups when used with + multithreaded task schedulers such as Dask. [#3949] + +API Changes +----------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Some transformations for an input coordinate that's a scalar now correctly + return a scalar. This was always the intended behavior, but it may break + code that has been written to work-around this bug, so it may be viewed as + an unplanned API change [#3920, #4039] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- The ``astropy_mpl_style`` no longer sets ``interactive`` to ``True``, but + instead leaves it at the user preference. This makes using the style + compatible with building docs with Sphinx, and other non-interactive + contexts. [#4030] + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Fix bug where coordinate representation setting gets reset to default value + when coordinate array is indexed or sliced. [#3824] + +- Fixed confusing warning message shown when using dates outside current IERS + data. [#3844] + +- ``get_sun`` now yields a scalar when the input time is a scalar (this was a + regression in v1.0.3 from v1.0.2) [#3998, #4039] + +- Fixed bug where some scalar coordinates were incorrectly being changed to + length-1 array coordinates after transforming through certain frames. + [#3920, #4039] + +- Fixed bug causing the ``separation`` methods of ``SkyCoord`` and frame + classes to fail due to infinite recursion [#4033, #4039] + +- Made it so that passing in a list of ``SkyCoord`` objects that are in + UnitSphericalRepresentation to the ``SkyCoord`` constructor appropriately + yields a new object in UnitSphericalRepresentation [#3938, #4039] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- Fixed wCDM to not ignore the Ob0 parameter on initialization. [#3934] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fixed crash when updating data in a random groups HDU opened in update + mode. [#3730] + +- Fixed incorrect checksum / datasum being written when re-writing a scaled + HDU (i.e. non-trivial BSCALE and/or BZERO) with + ``do_not_scale_image_data=False``. [#3883] + +- Fixed stray deprecation warning in ``BinTableHDU.copy()``. [#3798] + +- Better handling of the ``BLANK`` keyword when auto-scaling scaled image + data. The ``BLANK`` keyword is now removed from the header after + auto-scaling is applied, and it is restored properly (with floating point + NaNs replaced by the filler value) when updating a file opened with the + ``scale_back=True`` argument. Invalid usage of the ``BLANK`` keyword is + also better warned about during validation. [#3865] + +- Reading memmaped scaled images won't fail when + ``do_not_scale_image_data=True`` (that is, since we're just reading the raw + / physical data there is no reason mmap can't be used). [#3766] + +- Fixed a reference cycle that could sometimes cause FITS table-related + objects (``BinTableHDU``, ``ColDefs``, etc.) to hang around in memory + longer than expected. [#4012] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Improved support for pickling of compound models, including both compound + model instances, and new compound model classes. [#3867] + +- Added missing default values for ``Ellipse2D`` parameters. [#3903] + +astropy.time +^^^^^^^^^^^^ + +- Fixed iteration of scalar ``Time`` objects so that ``iter()`` correctly + raises a ``TypeError`` on them (while still allowing ``Time`` arrays to be + iterated). [#4048] + +astropy.units +^^^^^^^^^^^^^ + +- Added frequency-equivalency check when declaring doppler equivalencies + [#3728] + +- Define ``floor_divide`` (``//``) for ``Quantity`` to be consistent + ``divmod``, such that it only works where the quotient is dimensionless. + This guarantees that ``(q1 // q2) * q2 + (q1 % q2) == q1``. [#3817] + +- Fixed the documentation of supported units to correctly report support for + SI prefixes. Previously the table of supported units incorrectly showed + several derived unit as not supporting prefixes, when in fact they do. + [#3835] + +- Fix a crash when calling ``astropy.units.cds.enable()``. This will now + "set" rather than "add" units to the active set to avoid the namespace + clash with the default units. [#3873] + +- Ensure in-place operations on ``float32`` quantities work. [#4007] + +astropy.utils +^^^^^^^^^^^^^ + +- The ``deprecated`` decorator did not correctly wrap classes that have a + custom metaclass--the metaclass could be dropped from the deprecated + version of the class. [#3997] + +- The ``wraps`` decorator would copy the wrapped function's name to the + wrapper function even when ``'__name__'`` is excluded from the ``assigned`` + argument. [#4016] + +Misc +^^^^ + +- ``fitscheck`` no longer causes scaled image data to be rescaled when + adding checksums to existing files. [#3884] + +- Fixed an issue where running ``import astropy`` from within the source + tree did not automatically build the extension modules if the source is + from a source distribution (as opposed to a git repository). [#3932] + +- Fixed multiple instances of a bug that prevented Astropy from being used + when compiled with the ``python -OO`` flag, due to it causing all + docstrings to be stripped out. [#3923] + +- Removed source code template files that were being installed + accidentally alongside installed Python modules. [#4014] + +- Fixed a bug in the exception logging that caused a crash in the exception + handler itself on Python 3 when exceptions do not include a message. + [#4056] + + +1.0.3 (2015-06-05) +================== + +New Features +------------ + +astropy.table +^^^^^^^^^^^^^ + +- Greatly improved the speed of printing a large table to the screen when + only a few rows are being displayed. [#3796] + +astropy.time +^^^^^^^^^^^^ + +- Add support for the 2015-Jun-30 leap second. [#3794] + +API Changes +----------- + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Note that HTML formatted tables will not always be found with guess mode + unless it passes certain heuristics that strongly suggest the presence of + HTML in the input. Code that expects to read tables from HTML should + specify ``format='html'`` explicitly. See bug fixes below for more + details. [#3693] + +Bug Fixes +--------- + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- Fix issue with repeated normalizations of ``Kernels``. [#3747] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Fixed ``get_sun`` to yield frames with the ``obstime`` set to what's passed into the function (previously it incorrectly always had J2000). [#3750] + +- Fixed ``get_sun`` to account for aberration of light. [#3750] + +- Fixed error in the GCRS->ICRS transformation that gave incorrect distances. [#3750] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Remove HTML from the list of automatically-guessed formats when reading if + the file does not appear to be HTML. This was necessary to avoid a + commonly-encountered segmentation fault occurring in the libxml parser on + MacOSX. [#3693] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fixes to support the upcoming Numpy 1.10. [#3419] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Polynomials are now scaled when used in a compound model. [#3702] + +- Fixed the ``Ellipse2D`` model to be consistent with ``Disk2D`` in + how pixels are included. [#3736] + +- Fixed crash when evaluating a model that accepts no inputs. [#3772] + +astropy.testing +^^^^^^^^^^^^^^^ + +- The Astropy py.test plugins that disable unintentional internet access + in tests were also blocking use of local UNIX sockets in tests, which + prevented testing some multiprocessing code--fixed. [#3713] + +astropy.units +^^^^^^^^^^^^^ + +- Supported full SI prefixes for the barn unit ("picobarn", "femtobarn", + etc.) [#3753] + +- Fix loss of precision when multiplying non-whole-numbered powers + of units together. For example, before this change, ``(u.m ** + 1.5) ** Fraction(4, 5)`` resulted in an inaccurate floating-point + power of ``1.2000000000000002``. After this change, the exact + rational number of ``Fraction(6, 5)`` is maintained. [#3790] + +- Fixed printing of object ndarrays containing multiple Quantity + objects with differing / incompatible units. Note: Unit conversion errors + now cause a ``UnitConversionError`` exception to be raised. However, this + is a subclass of the ``UnitsError`` exception used previously, so existing + code that catches ``UnitsError`` should still work. [#3778] + +Other Changes and Additions +--------------------------- + +- Added a new ``astropy.__bibtex__`` attribute which gives a citation + for Astropy in bibtex format. [#3697] + +- The bundled version of ERFA was updated to v1.2.0 to address leapsecond + updates. [#3802] + + +0.4.6 (2015-05-29) +================== + +Bug Fixes +--------- + +astropy.time +^^^^^^^^^^^^ + +- Fixed ERFA code to handle the 2015-Jun-30 leap second. [#3795] + + +1.0.2 (2015-04-16) +================== + +New Features +------------ + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Added support for polynomials with degree 0 or degree greater than 15. + [#3574, 3589] + +Bug Fixes +--------- + +astropy.config +^^^^^^^^^^^^^^ + +- The pre-astropy-0.4 configuration API has been fixed. It was + inadvertently broken in 1.0.1. [#3627] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fixed a severe memory leak that occurred when reading tile compressed + images. [#3680] + +- Fixed bug where column data could be unintentionally byte-swapped when + copying data from an existing FITS file to a new FITS table with a + TDIMn keyword for that column. [#3561] + +- The ``ColDefs.change_attrib``, ``ColDefs.change_name``, and + ``ColDefs.change_unit`` methods now work as advertised. It is also + possible (and preferable) to update attributes directly on ``Column`` + objects (for example setting ``column.name``), and the change will be + accurately reflected in any associated table data and its FITS header. + [#3283, #1539, #2618] + +- Fixes an issue with the ``FITS_rec`` interface to FITS table data, where a + ``FITS_rec`` created by copying an existing FITS table but adding new rows + could not be sliced or masked correctly. [#3641] + +- Fixed handling of BINTABLE with TDIMn of size 1. [#3580] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- Loading a ``TABLE`` element without any ``DATA`` now correctly + creates a 0-row array. [#3636] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Added workaround to support inverses on compound models when one of the + sub-models is itself a compound model with a manually-assigned custom + inverse. [#3542] + +- Fixed instantiation of polynomial models with constraints for parameters + (constraints could still be assigned after instantiation, but not during). + [#3606] + +- Fixed fitting of 2D polynomial models with the ``LeVMarLSQFitter``. [#3606] + +astropy.table +^^^^^^^^^^^^^ + +- Ensure ``QTable`` can be pickled [#3590] + +- Some corner cases when instantiating an ``astropy.table.Table`` + with a Numpy array are handled [#3637]. Notably: + +- a zero-length array is the same as passing ``None`` + +- a scalar raises a ``ValueError`` + +- a one-dimensional array is treated as a single row of a table. + +- Ensure a ``Column`` without units is treated as an ``array``, not as an + dimensionless ``Quantity``. [#3648] + +astropy.units +^^^^^^^^^^^^^ + +- Ensure equivalencies that do more than just scale a ``Quantity`` are + properly handled also in ``ufunc`` evaluations. [#2496, #3586] + +- The LaTeX representation of the Angstrom unit has changed from + ``\overset{\circ}{A}`` to ``\mathring{A}``, which should have + better support across regular LaTeX, MathJax and matplotlib (as of + version 1.5) [#3617] + +astropy.vo +^^^^^^^^^^ + +- Using HTTPS/SSL for communication between SAMP hubs now works + correctly on all supported versions of Python [#3613] + +astropy.wcs +^^^^^^^^^^^ + +- When no ``relax`` argument is passed to ``WCS.to_header()`` and + the result omits non-standard WCS keywords, a warning is + emitted. [#3652] + +Other Changes and Additions +--------------------------- + +astropy.vo +^^^^^^^^^^ + +- The number of retries for connections in ``astropy.vo.samp`` can now be + configured by a ``n_retries`` configuration option. [#3612] + +- Testing + +- Running ``astropy.test()`` from within the IPython prompt has been + provisionally re-enabled. [#3184] + + +1.0.1 (2015-03-06) +================== + +Bug Fixes +--------- + +astropy.constants +^^^^^^^^^^^^^^^^^ + +- Ensure constants can be turned into ``Quantity`` safely. [#3537, #3538] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Fix a segfault in the fast C parser when one of the column headers + is empty [#3545]. + +- Fixed support for reading inf and nan values with the fast reader in + Windows. Also fixed in the case of using ``use_fast_converter=True`` + with the fast reader. [#3525] + +- Fixed use of mmap in the fast reader on Windows. [#3525] + +- Fixed issue where commented header would treat comments defining the table + (i.e. column headers) as purely information comments, leading to problems + when trying to round-trip the table. [#3562] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Fixed propagation of parameter constraints ('fixed', 'bounds', 'tied') + between compound models and their components. There is may still be some + difficulty defining 'tied' constraints properly for use with compound + models, however. [#3481] + +astropy.nddata +^^^^^^^^^^^^^^ + +- Restore several properties to the compatibility class ``NDDataArray`` that + were inadvertently omitted [#3466]. + +astropy.time +^^^^^^^^^^^^ + +- Time objects now always evaluate to ``True``, except when empty. [#3530] + +Miscellaneous +------------- + +- The ERFA wrappers are now written directly in the Python/C API + rather than using Cython, for greater performance. [#3521] + +- Improve import time of astropy [#3488]. + +Other Changes and Additions +--------------------------- + +- Updated bundled astropy-helpers version to v1.0.1 to address installation + issues with some packages that depend on Astropy. [#3541] + + +1.0 (2015-02-18) +================ + +General +------- + +- Astropy now requires Numpy 1.6.0 or later. + +New Features +------------ + +astropy.analytic_functions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- The ``astropy.analytic_functions`` was added to contain analytic functions + useful for astronomy [#3077]. + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- ``astropy.coordinates`` now has a full stack of frames allowing + transformations from ICRS or other celestial systems down to Alt/Az + coordinates. [#3217] + +- ``astropy.coordinates`` now has a ``get_sun`` function that gives + the coordinates of the Sun at a specified time. [#3217] + +- ``SkyCoord`` now has ``to_pixel`` and ``from_pixel`` methods that convert + between celestial coordinates as ``SkyCoord`` objects and pixel coordinates + given an ``astropy.wcs.WCS`` object. [#3002] + +- ``SkyCoord`` now has ``search_around_sky`` and ``search_around_3d`` + convenience methods that allow searching for all coordinates within + a certain distance of another ``SkyCoord``. [#2953] + +- ``SkyCoord`` can now accept a frame instance for the ``frame=`` keyword + argument. [#3063] + +- ``SkyCoord`` now has a ``guess_from_table`` method that can be used to + quickly create ``SkyCoord`` objects from an ``astropy.table.Table`` + object. [#2951] + +- ``astropy.coordinates`` now has a ``Galactocentric`` frame, a coordinate + frame centered on a (user specified) center of the Milky Way. [#2761, #3286] + +- ``SkyCoord`` now accepts more formats of the coordinate string when the + representation has ``ra`` and ``dec`` attributes. [#2920] + +- ``SkyCoord`` can now accept lists of ``SkyCoord`` objects, frame objects, + or representation objects and will combine them into a single object. + [#3285] + +- Frames and ``SkyCoord`` instances now have a method ``is_equivalent_frame`` + that can be used to check that two frames are equivalent (ignoring the + data). [#3330] + +- The ``__repr__`` of coordinate objects now shows scalar coordinates in the + same format as vector coordinates. [#3350, 3448] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- Added ``lookback_distance``, which is ``c * lookback_time``. [#3145] + +- Add baryonic matter density and dark matter only density parameters + to cosmology objects [#2757]. + +- Add a ``clone`` method to cosmology objects to allow copies + of cosmological objects to be created with the specified variables + modified [#2592]. + +- Increase default numerical precision of ``z_at_value`` following + the accurate by default, fast by explicit request model [#3074]. + +- Cosmology functions that take a single (redshift) input now + broadcast like numpy ufuncs. So, passing an arbitrarily shaped + array of inputs will produce an output of the same shape. [#3178, #3194] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Simplify the way new Reader classes are defined, allowing custom behavior + entirely by overriding inherited class attributes instead of setting + instance attributes in the Reader ``__init__`` method. [#2812] + +- There is now a faster C/Cython engine available for reading and writing + simple ASCII formats like CSV. Both are enabled by default, and fast + reading will fall back on an ordinary reader in case of a parsing + failure. Their behavior can be altered with the parameter ``fast_reader`` + in ``read`` and ``fast_writer`` in ``write``. [#2716] + +- Make Latex/AASTex tables use unit attribute of Column for output. [#3064] + +- Store comment lines encountered during reading in metadata of the + output table via ``meta['comment_lines']``. [#3222] + +- Write comment lines in Table metadata during output for all basic formats, + IPAC, and fast writers. This functionality can be disabled with + ``comment=False``. [#3255] + +- Add reader / writer for the Enhanced CSV format which stores table and + column meta data, in particular data type and unit. [#2319] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- The ``fitsdiff`` script ignores some things by default when comparing fits + files (e.g. empty header lines). This adds a ``--exact`` option where + nothing is ignored. [#2782, #3110] + +- The ``fitsheader`` script now takes a ``--keyword`` option to extract a + specific keyword from the header of a FITS file, and a ``--table`` option + to export headers into any of the data formats supported by + ``astropy.table``. [#2555, #2588] + +- ``Section`` now supports all advanced indexing features ``ndarray`` does + (slices with any steps, integer arrays, boolean arrays, None, Ellipsis). + It also properly returns scalars when this is appropriate. [#3148] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- ``astropy.io.votable.parse`` now takes a ``datatype_mapping`` + keyword argument to map invalid datatype names to valid ones in + order to support non-compliant files. [#2675] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Added the capability of creating new "compound" models by combining + existing models using arithmetic operators. See the "What's New in 1.0" + page in the Astropy documentation for more details. [#3231] + +- A new ``custom_model`` decorator/factory function has been added for + converting normal functions to ``Model`` classes that can work within + the Astropy modeling framework. This replaces the old ``custom_model_1d`` + function which is now deprecated. The new function works the same as + the old one but is less limited in the types of models it can be used to + created. [#1763] + +- The ``Model`` and ``Fitter`` classes have ``.registry`` attributes which + provide sets of all loaded ``Model`` and ``Fitter`` classes (this is + useful for building UIs for models and fitting). [#2725] + +- A dict-like ``meta`` member was added to ``Model``. it is to be used to + store any optional information which is relevant to a project and is not + in the standard ``Model`` class. [#2189] + +- Added ``Ellipse2D`` model. [#3124] + +astropy.nddata +^^^^^^^^^^^^^^ + +- New array-related utility functions in ``astropy.nddata.utils`` for adding + and removing arrays from other arrays with different sizes/shapes. [#3201] + +- New metaclass ``NDDataBase`` for enforcing the nddata interface in + subclasses without restricting implementation of the data storage. [#2905] + +- New mixin classes ``NDSlicingMixin`` for slicing, ``NDArithmeticMixin`` + for arithmetic operations, and ``NDIOMixin`` for input/ouput in NDData. [#2905] + +- Added a decorator ``support_nddata`` that can be used to write functions + that can either take separate arguments or NDData objects. [#2855] + +astropy.stats +^^^^^^^^^^^^^ + +- Added ``mad_std()`` function. [#3208] + +- Added ``gaussian_fwhm_to_sigma`` and ``gaussian_sigma_to_fwhm`` + constants. [#3208] + +- New function ``sigma_clipped_stats`` which can be used to quickly get + common statistics for an array, using sigma clipping at the same time. + [#3201] + +astropy.table +^^^^^^^^^^^^^ + +- Changed the internal implementation of the ``Table`` class changed so that + it no longer uses numpy structured arrays as the core table data container. + [#2790, #3179] + +- Tables can now be written to an html file that includes interactive + browsing capabilities. To write out to this format, use + ``Table.write('filename.html', format='jsviewer')``. [#2875] + +- A ``quantity`` property and ``to`` method were added to ``Table`` + columns that allow the column values to be easily converted to + ``astropy.units.Quantity`` objects. [#2950] + +- Add ``unique`` convenience method to table. [#3185] + +astropy.tests +^^^^^^^^^^^^^ + +- Added a new Quantity-aware ``assert_quantity_allclose``. [#3273] + +astropy.time +^^^^^^^^^^^^ + +- ``Time`` can now handle arbitrary array dimensions, with operations + following standard numpy broadcasting rules. [#3138] + +astropy.units +^^^^^^^^^^^^^ + +- Support for VOUnit has been updated to be compliant with version + 1.0 of the standard. [#2901] + +- Added an ``insert`` method to insert values into a ``Quantity`` object. + This is similar to the ``numpy.insert`` function. [#3049] + +- When viewed in IPython, ``Quantity`` objects with array values now render + using LaTeX and scientific notation. [#2271] + +- Added ``units.quantity_input`` decorator to validate quantity inputs to a + function for unit compatibility. [#3072] + +- Added ``units.astronomical_unit`` as a long form for ``units.au``. [#3303] + +astropy.utils +^^^^^^^^^^^^^ + +- Added a new decorator ``astropy.utils.wraps`` which acts as a replacement + for the standard library's ``functools.wraps``, the only difference being + that the decorated function also preserves the wrapped function's call + signature. [#2849] + +- ``astropy.utils.compat.numpy`` has been revised such that it can include + patched versions of routines from newer ``numpy`` versions. The first + addition is a version of ``broadcast_arrays`` that can be used with + ``Quantity`` and other ``ndarray`` subclasses (using the ``subok=True`` + flag). [#2327] + +- Added ``astropy.utils.resolve_name`` which returns a member of a module + or class given the fully qualified dotted name of that object as a + string. [#3389] + +- Added ``astropy.utils.minversion`` which can be used to check minimum + version requirements of Python modules (to test for specific features and/ + or bugs and the like). [#3389] + +astropy.visualization +^^^^^^^^^^^^^^^^^^^^^ + +- Created ``astropy.visualization`` module and added functionality relating + to image normalization (i.e. stretching and scaling) as well as a new + script ``fits2bitmap`` that can produce a bitmap image from a FITS file. + [#3201] + +- Added dictionary ``astropy.visualization.mpl_style.astropy_mpl_style`` + which can be used to set a uniform plotstyle specifically for tutorials + that is improved compared to matplotlib defaults. [#2719, #2787, #3200] + +astropy.wcs +^^^^^^^^^^^ + +- ``wcslib`` has been upgraded to version 4.25. This brings a + single new feature: + +- ``equinox`` and ``radesys`` will now be given default values + conforming with the WCS specification if ``EQUINOXa`` and + ``RADESYSa``, respectively, are not present in the header. + +- The minimum required version of ``wcslib`` is now 4.24. [#2503] + +- Added a new function ``wcs_to_celestial_frame`` that can be used to find + the astropy.coordinates celestial frame corresponding to a particular WCS. + [#2730] + +- ``astropy.wcs.WCS.compare`` now supports a ``tolerance`` keyword argument + to allow for approximate comparison of floating-point values. [#2503] + +- added ``pixel_scale_matrix``, ``celestial``, ``is_celestial``, and + ``has_celestial`` convenience attributes. Added + ``proj_plane_pixel_scales``, ``proj_plane_pixel_area``, and + ``non_celestial_pixel_scales`` utility functions for retrieving WCS pixel + scale and area information [#2832, #3304] + +- Added two functions ``pixel_to_skycoord`` and + ``skycoord_to_pixel`` that make it easy to convert between + SkyCoord objects and pixel coordinates. [#2885] + +- ``all_world2pix`` now uses a much more sophisticated and complete + algorithm to iteratively compute the inverse WCS transform. [#2816] + +- Add ability to use ``WCS`` object to define projections in Matplotlib, + using the ``WCSAxes`` package. [#3183] + +- Added ``is_proj_plane_distorted`` for testing if pixels are + distorted. [#3329] + +Misc +^^^^ + +- ``astropy._erfa`` was added as a new subpackage wrapping the functionality + of the ERFA library in python. This is primarily of use for other astropy + subpackages, but the API may be made more public in the future. [#2992] + + +API Changes +----------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Subclasses of ``BaseCoordinateFrame`` which define a custom ``repr`` should + be aware of the format expected in ``SkyCoord.__repr__()``, which changed in + this release. [#2704, #2882] + +- The ``CartesianPoints`` class (deprecated in v0.4) has now been removed. + [#2990] + +- The previous ``astropy.coordinates.builtin_frames`` module is now a + subpackage. Everything that was in the + ``astropy.coordinates.builtin_frames`` module is still accessible from the + new package, but the classes are now in separate modules. This should have + no direct impact at the user level. [#3120] + +- Support for passing a frame as a positional argument in the ``SkyCoord`` + class has now been deprecated, except in the case where a frame with data + is passed as the sole positional argument. [#3152] + +- Improved ``__repr__`` of coordinate objects representing a single + coordinate point for the sake of easier copy/pasting. [#3350] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- The functional interface to the cosmological routines as well as + ``set_current`` and ``get_current`` (deprecated in v0.4) have now been + removed. [#2990] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Added a new argument to ``htmldict`` in the HTML reader named + ``parser``, which allows the user to specify which parser + BeautifulSoup should use as a backend. [#2815] + +- Add ``FixedWidthTwoLine`` reader to guessing. This will allows to read + tables that a copied from screen output like ``print my_table`` to be read + automatically. Discussed in #3025 and #3099 [#3109] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- A new optional argument ``cache`` has been added to + ``astropy.io.fits.open()``. When opening a FITS file from a URL, + ``cache`` is a boolean value specifying whether or not to save the + file locally in Astropy's download cache (``True`` by default). [#3041] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Model classes should now specify ``inputs`` and ``outputs`` class + attributes instead of the old ``n_inputs`` and ``n_outputs``. These + should be tuples providing human-readable *labels* for all inputs and + outputs of the model. The length of the tuple indicates the numbers + of inputs and outputs. See "What's New in Astropy 1.0" for more + details. [#2835] + +- It is no longer necessary to include ``__init__`` or ``__call__`` + definitions in ``Model`` subclasses if all they do is wrap the + super-method in order to provide a nice call signature to the docs. + The ``inputs`` class attribute is now used to generate a nice call + signature, so these methods should only be overridden by ``Model`` + subclasses in order to provide new functionality. [#2835] + +- Most models included in Astropy now have sensible default values for most + or all of their parameters. Call ``help(ModelClass)`` on any model to + check what those defaults are. Most of them time they should be + overridden, but some of them are useful (for example spatial offsets are + always set at the origin by default). Another rule of thumb is that, where + possible, default parameters are set so that the model is a no-op, or + close to it, by default. [#2932] + +- The ``Model.inverse`` method has been changed to a *property*, so that + now accessing ``model.inverse`` on a model returns a new model that + implements that model's inverse, and *calling* ``model.inverse(...)``` on + some independent variable computes the value of the inverse (similar to what + the old ``Model.invert()`` method was meant to do). [#3024] + +- The ``Model.invert()`` method has been removed entirely (it was never + implemented and there should not be any existing code that relies on it). + [#3024] + +- ``custom_model_1d`` is deprecated in favor of the new ``custom_model`` + (see "New Features" above). [#1763] + +- The ``Model.param_dim`` property (deprecated in v0.4) has now been removed. + [#2990] + +- The ``Beta1D`` and ``Beta2D`` models have been renamed to ``Moffat1D`` and + ``Moffat2D``. [#3029] + +astropy.nddata +^^^^^^^^^^^^^^ + +- ``flags``, ``shape``, ``size``, ``dtype`` and ``ndim`` properties removed + from ``astropy.nddata.NDData``. [#2905] + +- Arithmetic operations, uncertainty propagation, slicing and automatic + conversion to a numpy array removed from ``astropy.nddata.NDData``. The + class ``astropy.nddata.NDDataArray`` is functionally equivalent to the + old ``NDData``. [#2905] + +astropy.table +^^^^^^^^^^^^^ + +- The ``Column.units`` property (deprecated in v0.3) has now been removed. + [#2990] + +- The ``Row.data`` and ``Table._data`` attributes have been deprecated + related to the change in Table implementation. They are replaced by + ``Row.as_void()`` and ``Table.as_array()`` methods, respectively. [#2790] + +- The ``Table.create_mask`` method has been removed. This undocumented + method was a development orphan and would cause corruption of the + table if called. [#2790] + +- The return type for integer item access to a Column (e.g. col[12] or + t['a'][12]) is now always a numpy scalar, numpy ``ndarray``, or numpy + ``MaskedArray``. Previously if the column was multidimensional then a + Column object would be returned. [#3095] + +- The representation of Table and Column objects has been changed to + be formatted similar to the print output. [#3239] + +astropy.time +^^^^^^^^^^^^ + +- The ``Time.val`` and ``Time.vals`` properties (deprecated in v0.3) and the + ``Time.lon``, and ``Time.lat`` properties (deprecated in v0.4) have now + been removed. [#2990] + +- Add ``decimalyear`` format that represents time as a decimal year. [#3265] + +astropy.units +^^^^^^^^^^^^^ + +- Support for VOUnit has been updated to be compliant with version + 1.0 of the standard. This means that some VOUnit strings that were + rejected before are now acceptable. [#2901] Notably: + +- SI prefixes are supported on most units + +- Binary prefixes are supported on "bits" and "bytes" + +- Custom units can be defined "inline" by placing them between single + quotes. + +- ``Unit.get_converter`` has been deprecated. It is not strictly + necessary for end users, and it was confusing due to lack of + support for ``Quantity`` objects. [#3456] + +astropy.utils +^^^^^^^^^^^^^ + +- Some members of ``astropy.utils.misc`` were moved into new submodules. + Specifically: + +- ``deprecated``, ``deprecated_attribute``, and ``lazyproperty`` -> + ``astropy.utils.decorators`` + +- ``find_current_module``, ``find_mod_objs`` -> + ``astropy.utils.introspection`` + + All of these functions can be imported directly from ``astropy.utils`` + which should be preferred over referencing individual submodules of + ``astropy.utils``. [#2857] + +- The ProgressBar.iterate class method (deprecated in v0.3) has now been + removed. [#2990] + +- Updated ``astropy/utils/console.py`` ProgressBar() module to + display output to IPython notebook with the addition of an + ``interactive`` kwarg. [#2658] [#2789] + +astropy.wcs +^^^^^^^^^^^ + +- The ``WCS.calcFootprint`` method (deprecated in v0.4) has now been removed. + [#2990] + +- An invalid unit in a ``CUNITn`` keyword now displays a warning and + returns a ``UnrecognizedUnit`` instance rather than raising an + exception [#3190] + +Bug Fixes +--------- + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- ``astropy.convolution.discretize_model`` now handles arbitrary callables + correctly [#2274]. + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- ``Angle.to_string`` now outputs unicode arrays instead of object arrays. + [#2981] + +- ``SkyCoord.to_string`` no longer gives an error when used with an array + coordinate with more than one dimension. [#3340] + +- Fixed support for subclasses of ``UnitSphericalRepresentation`` and + ``SphericalRepresentation`` [#3354, #3366] + +- Fixed latex display of array angles in IPython notebook. [#3480] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- In the ``CommentedHeader`` the ``data_start`` parameter now defaults to + ``0``, which is the first uncommented line. Discussed in #2692. [#3054] + +- Position lines in ``FixedWidthTwoLine`` reader could consist of many characters. + Now, only one character in addition to the delimiter is allowed. This bug was + discovered as part of [#3109] + +- The IPAC table writer now consistently uses the ``fill_values`` keyword to + specify the output null values. Previously the behavior was inconsistent + or incorrect. [#3259] + +- The IPAC table reader now correctly interprets abbreviated column types. + [#3279] + +- Tables that look almost, but not quite like DAOPhot tables could cause + guessing to fail. [#3342] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fixed the problem in ``fits.open`` of some filenames with colon (``:``) in + the name being recognized as URLs instead of file names. [#3122] + +- Setting ``memmap=True`` in ``fits.open`` and related functions now raises + a ValueError if opening a file in memory-mapped mode is impossible. [#2298] + +- CONTINUE cards no longer end the value of the final card in the series with + an ampersand, per the specification of the CONTINUE card convention. [#3282] + +- Fixed a crash that occurred when reading an ASCII table containing + zero-precision floating point fields. [#3422] + +- When a float field for an ASCII table has zero-precision a decimal point + (with no digits following it) is still written to the field as long as + there is space for it, as recommended by the FITS standard. This makes it + less ambiguous that these columns should be interpreted as floats. [#3422] + +astropy.logger +^^^^^^^^^^^^^^ + +- Fix a bug that occurred when displaying warnings that produced an error + message ``dictionary changed size during iteration``. [#3353] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Fixed a bug in ``SLSQPLSQFitter`` where the ``maxiter`` argument was not + passed correctly to the optimizer. [#3339] + +astropy.table +^^^^^^^^^^^^^ + +- Fix a problem where ``table.hstack`` fails to stack multiple references to + the same table, e.g. ``table.hstack([t, t])``. [#2995] + +- Fixed a problem where ``table.vstack`` and ``table.hstack`` failed to stack + a single table, e.g. ``table.vstack([t])``. [#3313] + +- Fix a problem when doing nested iterators on a single table. [#3358] + +- Fix an error when an empty list, tuple, or ndarray is used for item access + within a table. This now returns the table with no rows. [#3442] + +astropy.time +^^^^^^^^^^^^ + +- When creating a Time object from a datetime object the time zone + info is now correctly used. [#3160] + +- For Time objects, it is now checked that numerical input is finite. [#3396] + +astropy.units +^^^^^^^^^^^^^ + +- Added a ``latex_inline`` unit format that returns the units in LaTeX math + notation with negative exponents instead of fractions [#2622]. + +- When using a unit that is deprecated in a given unit format, + non-deprecated alternatives will be suggested. [#2806] For + example:: + + >>> import astropy.units as u + >>> u.Unit('Angstrom', format='fits') + WARNING: UnitsWarning: The unit 'Angstrom' has been deprecated + in the FITS standard. Suggested: nm (with data multiplied by + 0.1). [astropy.units.format.utils] + +astropy.utils +^^^^^^^^^^^^^ + +- ``treat_deprecations_as_exceptions`` has been fixed to recognize Astropy + deprecation warnings. [#3015] + +- Converted representation of progress bar units without suffix + from float to int in console.human_file_size. [#2201, #2202, #2721, #3299] + +astropy.wcs +^^^^^^^^^^^ + +- ``astropy.wcs.WCS.sub`` now accepts unicode strings as input on + Python 2.x [#3356] + +Misc +^^^^ + +- Some modules and tests that would crash upon import when using a non-final + release of Numpy (e.g. 1.9.0rc1). [#3471] + +Other Changes and Additions +--------------------------- + +- The bundled copy of astropy-helpers has been updated to v1.0. [#3515] + +- Updated ``astropy.extern.configobj`` to Version 5. Version 5 uses ``six`` + and the same code covers both Python 2 and Python 3. [#3149] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- The ``repr`` of ``SkyCoord`` and coordinate frame classes now separate + frame attributes and coordinate information. [#2704, #2882] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Overwriting an existing file using the ``clobber=True`` option no longer + displays a warning message. [#1963] + +- ``fits.open`` no longer catches ``OSError`` exceptions on missing or + unreadable files-- instead it raises the standard Python exceptions in such + cases. [#2756, #2785] + +astropy.table +^^^^^^^^^^^^^ + +- Sped up setting of ``Column`` slices by an order of magnitude. [#2994, #3020] + +- Updated the bundled ``six`` module to version 1.7.3 and made 1.7.3 the + minimum acceptable version of ``six``. [#2814] + +- The version of ERFA included with Astropy is now v1.1.1 [#2971] + +- The code base is now fully Python 2 and 3 compatible and no longer requires + 2to3. [#2033] + +- `funcsigs `_ is included in + utils.compat, but defaults to the inspect module components where available + (3.3+) [#3151]. + +- The list of modules displayed in the pytest header can now be customized. + [#3157] + +- `jinja2 `_>=2.7 is now required to build the + source code from the git repository, in order to allow the ERFA wrappers to + be generated. [#3166] + + +0.4.5 (2015-02-16) +================== + +Bug Fixes +--------- + +- Fixed unnecessary attempt to run ``git`` when importing astropy. In + particular, fixed a crash in Python 3 that could result from this when + importing Astropy when the the current working directory is an empty git + repository. [#3475] + +Other Changes and Additions +--------------------------- + +- Updated bundled copy of astropy-helpers to v0.4.6. [#3508] + + +0.4.4 (2015-01-21) +================== + +Bug Fixes +--------- + +astropy.vo.samp +^^^^^^^^^^^^^^^ + +- ``astropy.vo.samp`` is now usable on Python builds that do not + support the SSLv3 protocol (which depends both on the version of + Python and the version of OpenSSL or LibreSSL that it is built + against.) [#3308] + +API Changes +----------- + +astropy.vo.samp +^^^^^^^^^^^^^^^ + +- The default SSL protocol used is now determined from the default + used in the Python ``ssl`` standard library. This default may be + different depending on the exact version of Python you are using. + [#3308] + +astropy.wcs +^^^^^^^^^^^ + +- WCS allows slices of the form slice(None, x, y), which previously resulted + in an unsliced copy being returned (note: this was previously incorrectly + reported as fixed in v0.4.3) [#2909] + + +0.4.3 (2015-01-15) +================== + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- The ``Distance`` class has been fixed to no longer rely on the deprecated + cosmology functions. [#2991] + +- Ensure ``float32`` values can be used in coordinate representations. [#2983] + +- Fix frame attribute inheritance in ``SkyCoord.transform_to()`` method so + that the default attribute value (e.g. equinox) for the destination frame + gets used if no corresponding value was explicitly specified. [#3106] + +- ``Angle`` accepts hours:mins or deg:mins initializers (without + seconds). In these cases float minutes are also accepted. [#2843] + +- ``astropy.coordinates.SkyCoord`` objects are now copyable. [#2888] + +- ``astropy.coordinates.SkyCoord`` object attributes are now + immutable. It is still technically possible to change the + internal data for an array-valued coordinate object but this leads + to inconsistencies [#2889] and should not be done. [#2888] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- The ``ztol`` keyword argument to z_at_value now works correctly [#2993]. + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Fix a bug in Python 3 when guessing file format using a file object as + input. Also improve performance in same situation for Python 2. [#3132] + +- Fix a problem where URL was being downloaded for each guess. [#2001] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- The ``in`` operator now works correctly for checking if an extension + is in an ``HDUList`` (as given via EXTNAME, (EXTNAME, EXTVER) tuples, + etc.) [#3060] + +- Added workaround for bug in MacOS X <= 10.8 that caused np.fromfile to + fail. [#3078] + +- Added support for the ``RICE_ONE`` compression type synonym. [#3115] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Fixed a test failure on Debian/PowerPC and Debian/s390x. [#2708] + +- Fixed crash in evaluating models that have more outputs than inputs--this + case may not be handled as desired for all conceivable models of this + format (some may have to implement custom ``prepare_inputs`` and + ``prepare_outputs`` methods). But as long as all outputs can be assumed + to have a shape determined from the broadcast of all inputs with all + parameters then this can be used safely. [#3250] + +astropy.table +^^^^^^^^^^^^^ + +- Fix a bug that caused join to fail for multi-dimensional columns. [#2984] + +- Fix a bug where MaskedColumn attributes which had been changed since + the object was created were not being carried through when slicing. [#3023] + +- Fix a bug that prevented initializing a table from a structured array + with multi-dimensional columns with copy=True. [#3034] + +- Fixed unnecessarily large unicode columns when instantiating a table from + row data on Python 3. [#3052] + +- Improved the warning message when unable to aggregate non-numeric + columns. [#2700] + +astropy.units +^^^^^^^^^^^^^ + +- Operations on quantities with incompatible types now raises a much + more informative ``TypeError``. [#2934] + +- ``Quantity.tolist`` now overrides the ``ndarray`` method to give a + ``NotImplementedError`` (by renaming the previous ``list`` method). [#3050] + +- ``Quantity.round`` now always returns a ``Quantity`` (previously it + returned an ``ndarray`` for ``decimals>0``). [#3062] + +- Ensured ``np.squeeze`` always returns a ``Quantity`` (it only worked if + no dimensions were removed). [#3045] + +- Input to ``Quantity`` with a ``unit`` attribute no longer can get mangled + with ``copy=False``. [#3051] + +- Remove trailing space in ``__format__`` calls for dimensionless quantities. + [#3097] + +- Comparisons between units and non-unit-like objects now works + correctly. [#3108] + +- Units with fractional powers are now correctly multiplied together + by using rational arithmetic. [#3121] + +- Removed a few entries from spectral density equivalencies which did not + make sense. [#3153] + +astropy.utils +^^^^^^^^^^^^^ + +- Fixed an issue with the ``deprecated`` decorator on classes that invoke + ``super()`` in their ``__init__`` method. [#3004] + +- Fixed a bug which caused the ``metadata_conflicts`` parameter to be + ignored in the ``astropy.utils.metadata.merge`` function. [#3294] + +astropy.vo +^^^^^^^^^^ + +- Fixed an issue with reconnecting to a SAMP Hub. [#2674] + +astropy.wcs +^^^^^^^^^^^ + +- Invalid or out of range values passed to ``wcs_world2pix`` will + now be correctly identified and returned as ``nan`` + values. [#2965] + +- Fixed an issue which meant that Python thought ``WCS`` objects were + iterable. [#3066] + +Misc +^^^^ + +- Astropy will now work if your Python interpreter does not have the + ``bz2`` module installed. [#3104] + +- Fixed ``ResourceWarning`` for ``astropy/extern/bundled/six.py`` that could + occur sometimes after using Astropy in Python 3.4. [#3156] + +Other Changes and Additions +--------------------------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Improved the agreement of the FK5 <-> Galactic conversion with other + codes, and with the FK5 <-> FK4 <-> Galactic route. [#3107] + + +0.4.2 (2014-09-23) +================== + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- ``Angle`` accepts hours:mins or deg:mins initializers (without + seconds). In these cases float minutes are also accepted. + +- The ``repr`` for coordinate frames now displays the frame attributes + (ex: ra, dec) in a consistent order. It should be noted that as part of + this fix, the ``BaseCoordinateFrame.get_frame_attr_names()`` method now + returns an ``OrderedDict`` instead of just a ``dict``. [#2845] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Fixed a crash when reading scaled float data out of a FITS file that was + loaded from a string (using ``HDUList.fromfile``) rather than from a file. + [#2710] + +- Fixed a crash when reading data from an HDU whose header contained in + invalid value for the BLANK keyword (e.g., a string value instead of an + integer as required by the FITS Standard). Invalid BLANK keywords are now + warned about, but are otherwise ignored. [#2711] + +- Fixed a crash when reading the header of a tile-compressed HDU if that + header contained invalid duplicate keywords resulting in a ``KeyError`` + [#2750] + +- Fixed crash when reading gzip-compressed FITS tables through the Astropy + ``Table`` interface. [#2783] + +- Fixed corruption when writing new FITS files through to gzipped files. + [#2794] + +- Fixed crash when writing HDUs made with non-contiguous data arrays to + file-like objects. [#2794] + +- It is now possible to create ``astropy.io.fits.BinTableHDU`` + objects with a table with zero rows. [#2916] + +astropy.io.misc +^^^^^^^^^^^^^^^ + +- Fixed a bug that prevented h5py ``Dataset`` objects from being + automatically recognized by ``Table.read``. [#2831] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Make ``LevMarLSQFitter`` work with ``weights`` keyword. [#2900] + +astropy.table +^^^^^^^^^^^^^ + +- Fixed reference cycle in tables that could prevent ``Table`` objects + from being freed from memory. [#2879] + +- Fixed an issue where ``Table.pprint()`` did not print the header to + ``stdout`` when ``stdout`` is redirected (say, to a file). [#2878] + +- Fixed printing of masked values when a format is specified. [#1026] + +- Ensured that numpy ufuncs that return booleans return plain ``ndarray`` + instances, just like the comparison operators. [#2963] + +astropy.time +^^^^^^^^^^^^ + +- Ensure bigendian input to Time works on a little-endian machine + (and vice versa). [#2942] + +astropy.units +^^^^^^^^^^^^^ + +- Ensure unit is kept when adding 0 to quantities. [#2968] + +astropy.utils +^^^^^^^^^^^^^ + +- Fixed color printing on Windows with IPython 2.0. [#2878] + +astropy.vo +^^^^^^^^^^ + +- Improved error message on Cone Search time out. [#2687] + +Other Changes and Additions +--------------------------- + +- Fixed a couple issues with files being inappropriately included and/or + excluded from the source archive distributions of Astropy. [#2843, #2854] + +- As part of fixing the fact that masked elements of table columns could not be + printed when a format was specified, the column format string options were + expanded to allow simple specifiers such as ``'5.2f'``. [#2898] + +- Ensure numpy 1.9 is supported. [#2917] + +- Ensure numpy master is supported, by making ``np.cbrt`` work with quantities. + [#2937] + +0.4.1 (2014-08-08) +================== + +Bug Fixes +--------- + +astropy.config +^^^^^^^^^^^^^^ + +- Fixed a bug where an unedited configuration file from astropy + 0.3.2 would not be correctly identified as unedited. [#2772] This + resulted in the warning:: + + WARNING: ConfigurationChangedWarning: The configuration options + in astropy 0.4 may have changed, your configuration file was not + updated in order to preserve local changes. A new configuration + template has been saved to + '~/.astropy/config/astropy.0.4.cfg'. [astropy.config.configuration] + +- Fixed the error message that is displayed when an old + configuration item has moved. Before, the destination + section was wrong. [#2772] + +- Added configuration settings for ``io.fits``, ``io.votable`` and + ``table.jsviewer`` that were missing from the configuration file + template. [#2772] + +- The configuration template is no longer rewritten on every import + of astropy, causing race conditions. [#2805] + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- Fixed the multiplication of ``Kernel`` with numpy floats. [#2174] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- ``Distance`` can now take a list of quantities. [#2261] + +- For in-place operations for ``Angle`` instances in which the result unit + is not an angle, an exception is raised before the instance is corrupted. + [#2718] + +- ``CartesianPoints`` are now deprecated in favor of + ``CartesianRepresentation``. [#2727] + +astropy.io.misc +^^^^^^^^^^^^^^^ + +- An existing table within an HDF5 file can be overwritten without affecting + other datasets in the same HDF5 file by simultaneously using + ``overwrite=True`` and ``append=True`` arguments to the ``Table.write`` + method. [#2624] + +astropy.logger +^^^^^^^^^^^^^^ + +- Fixed a crash that could occur in rare cases when (such as in bundled + apps) where submodules of the ``email`` package are not importable. [#2671] + +astropy.nddata +^^^^^^^^^^^^^^ + +- ``astropy.nddata.NDData()`` no longer raises a ``ValueError`` when passed + a numpy masked array which has no masked entries. [#2784] + +astropy.table +^^^^^^^^^^^^^ + +- When saving a table to a FITS file containing a unit that is not + supported by the FITS standard, a warning rather than an exception + is raised. [#2797] + +astropy.units +^^^^^^^^^^^^^ + +- By default, ``Quantity`` and its subclasses will now convert to float also + numerical types such as ``decimal.Decimal``, which are stored as objects + by numpy. [#1419] + +- The units ``count``, ``pixel``, ``voxel`` and ``dbyte`` now output + to FITS, OGIP and VOUnit formats correctly. [#2798] + +astropy.utils +^^^^^^^^^^^^^ + +- Restored missing information from deprecation warning messages + from the ``deprecated`` decorator. [#2811] + +- Fixed support for ``staticmethod`` deprecation in the ``deprecated`` + decorator. [#2811] + +astropy.wcs +^^^^^^^^^^^ + +- Fixed a memory leak when ``astropy.wcs.WCS`` objects are copied + [#2754] + +- Fixed a crash when passing ``ra_dec_order=True`` to any of the + ``*2world`` methods. [#2791] + +Other Changes and Additions +--------------------------- + +- Bundled copy of astropy-helpers upgraded to v0.4.1. [#2825] + +- General improvements to documentation and docstrings [#2722, #2728, #2742] + +- Made it easier for third-party packagers to have Astropy use their own + version of the ``six`` module (so long as it meets the minimum version + requirement) and remove the copy bundled with Astropy. See the + astropy/extern/README file in the source tree. [#2623] + + +0.4 (2014-07-16) +================ + +New Features +------------ + +astropy.constants +^^^^^^^^^^^^^^^^^ + +- Added ``b_wien`` to represent Wien wavelength displacement law constant. + [#2194] + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- Changed the input parameter in ``Gaussian1DKernel`` and + ``Gaussian2DKernel`` from ``width`` to ``stddev`` [#2085]. + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- The coordinates package has undergone major changes to implement + `APE5 `_ . + These include backwards-incompatible changes, as the underlying framework + has changed substantially. See the APE5 text and the package documentation + for more details. [#2422] + +- A ``position_angle`` method has been added to the new ``SkyCoord``. [#2487] + +- Updated ``Angle.dms`` and ``Angle.hms`` to return ``namedtuple`` -s instead + of regular tuples, and added ``Angle.signed_dms`` attribute that gives the + absolute value of the ``d``, ``m``, and ``s`` along with the sign. [#1988] + +- By default, ``Distance`` objects are now required to be positive. To + allow negative values, set ``allow_negative=True`` in the ``Distance`` + constructor when creating a ``Distance`` instance. + +- ``Longitude`` (resp. ``Latitude``) objects cannot be used any more to + initialize or set ``Latitude`` (resp. ``Longitude``) objects. An explicit + conversion to ``Angle`` is now required. [#2461] + +- The deprecated functions for pre-0.3 coordinate object names like + ``ICRSCoordinates`` have been removed. [#2422] + +- The ``rotation_matrix`` and ``angle_axis`` functions in + ``astropy.coordinates.angles`` were made more numerically consistent and + are now tested explicitly [#2619] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- Added ``z_at_value`` function to find the redshift at which a cosmology + function matches a desired value. [#1909] + +- Added ``FLRW.differential_comoving_volume`` method to give the differential + comoving volume at redshift z. [#2103] + +- The functional interface is now deprecated in favor of the more-explicit + use of methods on cosmology objects. [#2343] + +- Updated documentation to reflect the removal of the functional + interface. [#2507] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- The ``astropy.io.ascii`` output formats ``latex`` and ``aastex`` accept a + dictionary called ``latex_dict`` to specify options for LaTeX output. It is + now possible to specify the table alignment within the text via the + ``tablealign`` keyword. [#1838] + +- If ``header_start`` is specified in a call to ``ascii.get_reader`` or any + method that calls ``get_reader`` (e.g. ``ascii.read``) but ``data_start`` + is not specified at the same time, then ``data_start`` is calculated so + that the data starts after the header. Before this, the default was + that the header line was read again as the first data line + [#855 and #1844]. + +- A new ``csv`` format was added as a convenience for handling CSV (comma- + separated values) data. [#1935] + This format also recognises rows with an inconsistent number of elements. + [#1562] + +- An option was added to guess the start of data for CDS format files when + they do not strictly conform to the format standard. [#2241] + +- Added an HTML reader and writer to the ``astropy.io.ascii`` package. + Parsing requires the installation of BeautifulSoup and is therefore + an optional feature. [#2160] + +- Added support for inputting column descriptions and column units + with the ``io.ascii.SExtractor`` reader. [#2372] + +- Allow the use of non-local ReadMe files in the CDS reader. [#2329] + +- Provide a mechanism to select how masked values are printed. [#2424] + +- Added support for reading multi-aperture daophot file. [#2656] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Included a new command-line script called ``fitsheader`` to display the + header(s) of a FITS file from the command line. [#2092] + +- Added new verification options ``fix+ignore``, ``fix+warn``, + ``fix+exception``, ``silentfix+ignore``, ``silentfix+warn``, and + ``silentfix+exception`` which give more control over how to report fixable + errors as opposed to unfixable errors. + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Prototype implementation of fitters that treat optimization algorithms + separately from fit statistics, allowing new fitters to be created by + mixing and matching optimizers and statistic functions. [#1914] + +- Slight overhaul to how inputs to and outputs from models are handled with + respect to array-valued parameters and variables, as well as sets of + multiple models. See the associated PR and the modeling section of the + v0.4 documentation for more details. [#2634] + +- Added a new ``SimplexLSQFitter`` which uses a downhill simplex optimizer + with a least squares statistic. [#1914] + +- Changed ``Gaussian2D`` model such that ``theta`` now increases + counterclockwise. [#2199] + +- Replaced the ``MatrixRotation2D`` model with a new model called simply + ``Rotation2D`` which requires only an angle to specify the rotation. + The new ``Rotation2D`` rotates in a counter-clockwise sense whereas + the old ``MatrixRotation2D`` increased the angle clockwise. + [#2266, #2269] + +- Added a new ``AffineTransformation2D`` model which serves as a + replacement for the capability of ``MatrixRotation2D`` to accept an + arbitrary matrix, while also adding a translation capability. [#2269] + +- Added ``GaussianAbsorption1D`` model. [#2215] + +- New ``Redshift`` model [#2176]. + +astropy.nddata +^^^^^^^^^^^^^^ + +- Allow initialization ``NDData`` or ``StdDevUncertainty`` with a + ``Quantity``. [#2380] + +astropy.stats +^^^^^^^^^^^^^ + +- Added flat prior to binom_conf_interval and binned_binom_proportion + +- Change default in ``sigma_clip`` from ``np.median`` to ``np.ma.median``. + [#2582] + +astropy.sphinx +^^^^^^^^^^^^^^ + +- Note, the following new features are included in astropy-helpers as well: + +- The ``automodapi`` and ``automodsumm`` extensions now include sphinx + configuration options to write out what ``automodapi`` and ``automodsumm`` + generate, mainly for debugging purposes. [#1975, #2022] + +- Reference documentation now shows functions/class docstrings at the + inteded user-facing API location rather than the actual file where + the implementation is found. [#1826] + +- The ``automodsumm`` extension configuration was changed to generate + documentation of class ``__call__`` member functions. [#1817, #2135] + +- ``automodapi`` and ``automodsumm`` now have an ``:allowed-package-names:`` + option that make it possible to document functions and classes that + are in a different namespace. [#2370] + +astropy.table +^^^^^^^^^^^^^ + +- Improved grouped table aggregation by using the numpy ``reduceat()`` method + when possible. This can speed up the operation by a factor of at least 10 + to 100 for large unmasked tables and columns with relatively small + group sizes. [#2625] + +- Allow row-oriented data input using a new ``rows`` keyword argument. + [#850] + +- Allow subclassing of ``Table`` and the component classes ``Row``, ``Column``, + ``MaskedColumn``, ``TableColumns``, and ``TableFormatter``. [#2287] + +- Fix to allow numpy integer types as valid indices into tables in + Python 3.x [#2477] + +- Remove transition code related to the order change in ``Column`` and + ``MaskedColumn`` arguments ``name`` and ``data`` from Astropy 0.2 + to 0.3. [#2511] + +- Change HTML table representation in IPython notebook to show all + table columns instead of restricting to 80 column width. [#2651] + +astropy.time +^^^^^^^^^^^^ + +- Mean and apparent sidereal time can now be calculated using the + ``sidereal_time`` method [#1418]. + +- The time scale now defaults to UTC if no scale is provided. [#2091] + +- ``TimeDelta`` objects can have all scales but UTC, as well as, for + consistency with time-like quantities, undefined scale (where the + scale is taken from the object one adds to or subtracts from). + This allows, e.g., to work consistently in TDB. [#1932] + +- ``Time`` now supports ISO format strings that end in "Z". [#2211, #2203] + +astropy.units +^^^^^^^^^^^^^ + +- Support for the unit format `Office of Guest Investigator Programs (OGIP) + FITS files + `__ + has been added. [#377] + +- The ``spectral`` equivalency can now handle angular wave number. [#1306 and + #1899] + +- Added ``one`` as a shorthand for ``dimensionless_unscaled``. [#1980] + +- Added ``dex`` and ``dB`` units. [#1628] + +- Added ``temperature()`` equivalencies to support conversion between + Kelvin, Celsius, and Fahrenheit. [#2209] + +- Added ``temperature_energy()`` equivalencies to support conversion + between electron-volt and Kelvin. [#2637] + +- The runtime of ``astropy.units.Unit.compose`` is greatly improved + (by a factor of 2 in most cases) [#2544] + +- Added ``electron`` unit. [#2599] + +astropy.utils +^^^^^^^^^^^^^ + +- ``timer.RunTimePredictor`` now uses ``astropy.modeling`` in its + ``do_fit()`` method. [#1896] + +astropy.vo +^^^^^^^^^^ + +- A new sub-package, ``astropy.vo.samp``, is now available (this was + previously the SAMPy package, which has been refactored for use in + Astropy). [#1907] + +- Enhanced functionalities for ``VOSCatalog`` and ``VOSDatabase``. [#1206] + +astropy.wcs +^^^^^^^^^^^ + +- astropy now requires wcslib version 4.23. The version of wcslib + included with astropy has been updated to version 4.23. + +- Bounds checking is now performed on native spherical + coordinates. Any out-of-bounds values will be returned as + ``NaN``, and marked in the ``stat`` array, if using the + low-level ``wcslib`` interface such as + ``astropy.wcs.Wcsprm.p2s``. [#2107] + +- A new method, ``astropy.wcs.WCS.compare()``, compares two wcsprm + structs for equality with varying degrees of strictness. [#2361] + +- New ``astropy.wcs.utils`` module, with a handful of tools for manipulating + WCS objects, including dropping, swapping, and adding axes. + +Misc +^^^^ + +- Includes the new astropy-helpers package which separates some of Astropy's + build, installation, and documentation infrastructure out into an + independent package, making it easier for Affiliated Packages to depend on + these features. astropy-helpers replaces/deprecates some of the submodules + in the ``astropy`` package (see API Changes below). See also + `APE 4 `_ + for more details on the motivation behind and implementation of + astropy-helpers. [#1563] + + +API Changes +----------- + +astropy.config +^^^^^^^^^^^^^^ + +- The configuration system received a major overhaul, as part of APE3. It is + no longer possible to save configuration items from Python, but instead + users must edit the configuration file directly. The locations of + configuration items have moved, and some have been changed to science state + values. The old locations should continue to work until astropy 0.5, but + deprecation warnings will be displayed. See the `Configuration transition + `_ + docs for a detailed description of the changes and how to update existing + code. [#2094] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- The ``astropy.io.fits.new_table`` function is now fully deprecated (though + will not be removed for a long time, considering how widely it is used). + + Instead please use the more explicit ``BinTableHDU.from_columns`` to create + a new binary table HDU, and the similar ``TableHDU.from_columns`` to create + a new ASCII table. These otherwise accept the same arguments as + ``new_table`` which is now just a wrapper for these. + +- The ``.fromstring`` classmethod of each HDU type has been simplified such + that, true to its namesake, it only initializes an HDU from a string + containing its header *and* data. + +- Fixed an issue where header wildcard matching (for example + ``header['DATE*']``) can be used to match *any* characters that might + appear in a keyword. Previously this only matched keywords containing + characters in the set ``[0-9A-Za-z_]``. Now this can also match a hyphen + ``-`` and any other characters, as some conventions like ``HIERARCH`` and + record-valued keyword cards allow a wider range of valid characters than + standard FITS keywords. + +- This will be the *last* release to support the following APIs that have + been marked deprecated since Astropy v0.1/PyFITS v3.1: + +- The ``CardList`` class, which was part of the old header implementation. + +- The ``Card.key`` attribute. Use ``Card.keyword`` instead. + +- The ``Card.cardimage`` and ``Card.ascardimage`` attributes. Use simply + ``Card.image`` or ``str(card)`` instead. + +- The ``create_card`` factory function. Simply use the normal ``Card`` + constructor instead. + +- The ``create_card_from_string`` factory function. Use ``Card.fromstring`` + instead. + +- The ``upper_key`` function. Use ``Card.normalize_keyword`` method + instead (this is not unlikely to be used outside of PyFITS itself, but it + was technically public API). + +- The usage of ``Header.update`` with ``Header.update(keyword, value, + comment)`` arguments. ``Header.update`` should only be used analogously + to ``dict.update``. Use ``Header.set`` instead. + +- The ``Header.ascard`` attribute. Use ``Header.cards`` instead for a list + of all the ``Card`` objects in the header. + +- The ``Header.rename_key`` method. Use ``Header.rename_keyword`` instead. + +- The ``Header.get_history`` method. Use ``header['HISTORY']`` instead + (normal keyword lookup). + +- The ``Header.get_comment`` method. Use ``header['COMMENT']`` instead. + +- The ``Header.toTxtFile`` method. Use ``header.totextfile`` instead. + +- The ``Header.fromTxtFile`` method. Use ``Header.fromtextfile`` instead. + +- The ``tdump`` and ``tcreate`` functions. Use ``tabledump`` and + ``tableload`` respectively. + +- The ``BinTableHDU.tdump`` and ``tcreate`` methods. Use + ``BinTableHDU.dump`` and ``BinTableHDU.load`` respectively. + +- The ``txtfile`` argument to the ``Header`` constructor. Use + ``Header.fromfile`` instead. + +- The ``startColumn`` and ``endColumn`` arguments to the ``FITS_record`` + constructor. These are unlikely to be used by any user code. + + These deprecated interfaces will be removed from the development version of + Astropy following the v0.4 release (they will still be available in any + v0.4.x bugfix releases, however). + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- The method computing the derivative of the model with respect + to parameters was renamed from ``deriv`` to ``fit_deriv``. [#1739] + +- ``ParametricModel`` and the associated ``Parametric1DModel`` and + ``Parametric2DModel`` classes have been renamed ``FittableModel``, + ``Fittable1DModel``, and ``Fittable2DModel`` respectively. The base + ``Model`` class has subsumed the functionality of the old + + ``ParametricModel`` class so that all models support parameter constraints. + The only distinction of ``FittableModel`` is that anything which subclasses + it is assumed "safe" to use with Astropy fitters. [#2276] + +- ``NonLinearLSQFitter`` has been renamed ``LevMarLSQFitter`` to emphasise + that it uses the Levenberg-Marquardt optimization algorithm with a + least squares statistic function. [#1914] + +- The ``SLSQPFitter`` class has been renamed ``SLSQPLSQFitter`` to emphasize + that it uses the Sequential Least Squares Programming optimization + algorithm with a least squares statistic function. [#1914] + +- The ``Fitter.errorfunc`` method has been renamed to the more general + ``Fitter.objective_function``. [#1914] + +astropy.nddata +^^^^^^^^^^^^^^ + +- Issue warning if unit is changed from a non-trivial value by directly + setting ``NDData.unit``. [#2411] + +- The ``mask`` and ``flag`` attributes of ``astropy.nddata.NDData`` can now + be set with any array-like object instead of requiring that they be set + with a ``numpy.ndarray``. [#2419] + +astropy.sphinx +^^^^^^^^^^^^^^ + +- Use of the ``astropy.sphinx`` module is deprecated; all new development of + this module is in ``astropy_helpers.sphinx`` which should be used instead + (therefore documentation builds that made use of any of the utilities in + ``astropy.sphinx`` now have ``astropy_helpers`` as a documentation + dependency). + +astropy.table +^^^^^^^^^^^^^ + +- The default table printing function now shows a table header row for units + if any columns have the unit attribute set. [#1282] + +- Before, an unmasked ``Table`` was automatically converted to a masked + table if generated from a masked Table or a ``MaskedColumn``. + Now, this conversion is only done if explicitly requested or if any + of the input values is actually masked. [#1185] + +- The repr() function of ``astropy.table.Table`` now shows the units + if any columns have the unit attribute set. [#2180] + +- The semantics of the config options ``table.max_lines`` and + ``table.max_width`` has changed slightly. If these values are not + set in the config file, astropy will try to determine the size + automatically from the terminal. [#2683] + +astropy.time +^^^^^^^^^^^^ + +- Correct use of UT in TDB calculation [#1938, #1939]. + +- ``TimeDelta`` objects can have scales other than TAI [#1932]. + +- Location information should now be passed on via an ``EarthLocation`` + instance or anything that initialises it, e.g., a tuple containing + either geocentric or geodetic coordinates. [#1928] + +astropy.units +^^^^^^^^^^^^^ + +- ``Quantity`` now converts input to float by default, as this is physically + most sensible for nearly all units [#1776]. + +- ``Quantity`` comparisons with ``==`` or ``!=`` now always return ``True`` + or ``False``, even if units do not match (for which case a ``UnitsError`` + used to be raised). [#2328] + +- Applying ``float`` or ``int`` to a ``Quantity`` now works for all + dimensionless quantities; they are automatically converted to unscaled + dimensionless. [#2249] + +- The exception ``astropy.units.UnitException``, which was + deprecated in astropy 0.2, has been removed. Use + ``astropy.units.UnitError`` instead [#2386] + +- Initializing a ``Quantity`` with a valid number/array with a ``unit`` + attribute now interprets that attribute as the units of the input value. + This makes it possible to initialize a ``Quantity`` from an Astropy + ``Table`` column and have it correctly pick up the units from the column. + [#2486] + +astropy.wcs +^^^^^^^^^^^ + +- ``calcFootprint`` was deprecated. It is replaced by + ``calc_footprint``. An optional boolean keyword ``center`` was + added to ``calc_footprint``. It controls whether the centers or + the corners of the pixels are used in the computation. [#2384] + +- ``astropy.wcs.WCS.sip_pix2foc`` and + ``astropy.wcs.WCS.sip_foc2pix`` formerly did not conform to the + ``SIP`` standard: ``CRPIX`` was added to the ``foc`` result so + that it could be used as input to "core FITS WCS". As of astropy + 0.4, ``CRPIX`` is no longer added to the result, so the ``foc`` + space is correct as defined in the `SIP convention + `__. [#2360] + +- ``astropy.wcs.UnitConverter``, which was deprecated in astropy + 0.2, has been removed. Use the ``astropy.units`` module + instead. [#2386] + +- The following methods on ``astropy.wcs.WCS``, which were + deprecated in astropy 0.1, have been removed [#2386]: + +- ``all_pix2sky`` -> ``all_pix2world`` + +- ``wcs_pix2sky`` -> ``wcs_pix2world`` + +- ``wcs_sky2pix`` -> ``wcs_world2pix`` + +- The ``naxis1`` and ``naxis2`` attributes and the ``get_naxis`` + method of ``astropy.wcs.WCS``, which were deprecated in astropy + 0.2, have been removed. Use the shape of the underlying FITS data + array instead. [#2386] + +Misc +^^^^ + +- The ``astropy.setup_helpers`` and ``astropy.version_helpers`` modules are + deprecated; any non-critical fixes and development to those modules should + be in ``astropy_helpers`` instead. Packages that use these modules in + their ``setup.py`` should depend on ``astropy_helpers`` following the same + pattern as in the Astropy package template. + + +Bug Fixes +--------- + +astropy.constants +^^^^^^^^^^^^^^^^^ + +- ``astropy.constants.Contant`` objects can now be deep + copied. [#2601] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- The distance modulus function in ``astropy.cosmology`` can now handle + negative distances, which can occur in certain closed cosmologies. [#2008] + +- Removed accidental imports of some extraneous variables in + ``astropy.cosmology`` [#2025] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- ``astropy.io.ascii.read`` would fail to read lists of strings where some of + the strings consisted of just a newline ("\n"). [#2648] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Use NaN for missing values in FITS when using Table.write for float + columns. Earlier the default fill value was close to 1e20.[#2186] + +- Fixes for checksums on 32-bit platforms. Results may be different + if writing or checking checksums in "nonstandard" mode. [#2484] + +- Additional minor bug fixes ported from PyFITS. [#2575] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- It is now possible to save an ``astropy.table.Table`` object as a + VOTable with any of the supported data formats, ``tabledata``, + ``binary`` and ``binary2``, by using the ``tabledata_format`` + kwarg. [#2138] + +- Fixed a crash writing out variable length arrays. [#2577] + +astropy.nddata +^^^^^^^^^^^^^^ + +- Indexing ``NDData`` in a way that results in a single element returns that + element. [#2170] + +- Change construction of result of arithmetic and unit conversion to allow + subclasses to require the presence of attribute like unit. [#2300] + +- Scale uncertainties to correct units in arithmetic operations and unit + conversion. [#2393] + +- Ensure uncertainty and mask members are copied in arithmetic and + convert_unit_to. [#2394] + +- Mask result of arithmetic if either of the operands is masked. [#2403] + +- Copy all attributes of input object if ``astropy.nddata.NDData`` is + initialized with an ``NDData`` object. [#2406] + +- Copy ``flags`` to new object in ``convert_unit_to``. [#2409] + +- Result of ``NDData`` arithmetic makes a copy of any WCS instead of using + a reference. [#2410] + +- Fix unit handling for multiplication/division and use + ``astropy.units.Quantity`` for units arithmetic. [#2413] + +- A masked ``NDData`` is now converted to a masked array when used in an + operation or ufunc with a numpy array. [#2414] + +- An unmasked ``NDData`` now uses an internal representation of its mask + state that ``numpy.ma`` expects so that an ``NDData`` behaves as an + unmasked array. [#2417] + +astropy.sphinx +^^^^^^^^^^^^^^ + +- Fix crash in smart resolver when the resolution doesn't work. [#2591] + +astropy.table +^^^^^^^^^^^^^ + +- The ``astropy.table.Column`` object can now use both functions and callable + objects as formats. [#2313] + +- Fixed a problem on 64 bit windows that caused errors + "expected 'DTYPE_t' but got 'long long'" [#2490] + +- Fix initialisation of ``TableColumns`` with lists or tuples. [#2647] + +- Fix removal of single column using ``remove_columns``. [#2699] + +- Fix a problem that setting a row element within a masked table did not + update the corresponding table element. [#2734] + +astropy.time +^^^^^^^^^^^^ + +- Correct UT1->UTC->UT1 round-trip being off by 1 second if UT1 is + on a leap second. [#2077] + +astropy.units +^^^^^^^^^^^^^ + +- ``Quantity.copy`` now behaves identically to ``ndarray.copy``, and thus + supports the ``order`` argument (for numpy >=1.6). [#2284] + +- Composing base units into identical composite units now works. [#2382] + +- Creating and composing/decomposing units is now substantially faster [#2544] + +- ``Quantity`` objects now are able to be assigned NaN [#2695] + +astropy.wcs +^^^^^^^^^^^ + +- Astropy now requires wcslib version 4.23. The version of wcslib + included with astropy has been updated to version 4.23. + +- Bug fixes in the projection routines: in ``hpxx2s`` [the + cartesian-to-spherical operation of the ``HPX`` projection] + relating to bounds checking, bug introduced at wcslib 4.20; in + ``parx2s`` and molx2s`` [the cartesion-to-spherical operation of + the ``PAR`` and ``MOL`` projections respectively] relating to + setting the stat vector; in ``hpxx2s`` relating to implementation + of the vector API; and in ``xphx2s`` relating to setting an + out-of-bounds value of *phi*. + +- In the ``PCO`` projection, use alternative projection equations + for greater numerical precision near theta == 0. In the ``COP`` + projection, return an exact result for theta at the poles. + Relaxed the tolerance for bounds checking a little in ``SFL`` + projection. + +- Fix a bug allocating insufficient memory in + ``astropy.wcs.WCS.sub`` [#2468] + +- A new method, ``Wcsprm.bounds_check`` (corresponding to wcslib's + ``wcsbchk``) has been added to control what bounds checking is performed by + wcslib. + +- ``WCS.to_header`` will now raise a more meaningful exception when the WCS + information is invalid or inconsistent in some way. [#1854] + +- In ``WCS.to_header``, ``RESTFRQ`` and ``RESTWAV`` are no longer + rewritten if zero. [#2468] + +- In ``WCS.to_header``, floating point values will now always be written + with an exponent or fractional part, i.e. ``.0`` being appended if necessary + to acheive this. [#2468] + +- If the C extension for ``astropy.wcs`` was not built or fails to import for + any reason, ``import astropy.wcs`` will result in an ``ImportError``, + rather than getting obscure errors once the ``astropy.wcs`` is used. + [#2061] + +- When the C extension for ``astropy.wcs`` is built using a version of + ``wscslib`` already present in the system, the package does not try + to install ``wcslib`` headers under ``astropy/wcs/include``. [#2536] + +- Fixes an unresolved external symbol error in the + ``astropy.wcs._wcs`` C extension on Microsoft Windows when built + with a Microsoft compiler. [#2478] + +Misc +^^^^ + +- Running the test suite with ``python setup.py test`` now works if + the path to the source contains spaces. [#2488] + +- The version of ERFA included with Astropy is now v1.1.0 [#2497] + +- Removed deprecated option from travis configuration and force use of + wheels rather than allowing build from source. [#2576] + +- The short option ``-n`` to run tests in parallel was broken + (conflicts with the distutils built-in option of "dry-run"). + Changed to ``-j``. [#2566] + +Other Changes and Additions +--------------------------- + +- python setup.py test --coverage will now give more accurate + results, because the coverage analysis will include early imports of + astropy. There doesn't seem to be a way to get this to work when + doing ``import astropy; astropy.test()``, so the ``coverage`` + keyword to ``astropy.test`` has been removed. Coverage testing now + depends only on `coverage.py + `__, not + ``pytest-cov``. [#2112] + +- The included version of py.test has been upgraded to 2.5.1. [#1970] + +- The included version of six.py has been upgraded to 1.5.2. [#2006] + +- Where appropriate, tests are now run both with and without the + ``unicode_literals`` option to ensure that we support both cases. [#1962] + +- Running the Astropy test suite from within the IPython REPL is disabled for + now due to bad interaction between the test runner and IPython's logging + and I/O handler. For now, run the Astropy tests should be run in the basic + Python interpreter. [#2684] + +- Added support for numerical comparison of floating point values appearing in + the output of doctests using a ``+FLOAT_CMP`` doctest flag. [#2087] + +- A monkey patch is performed to fix a bug in Numpy version 1.7 and + earlier where unicode fill values on masked arrays are not + supported. This may cause unintended side effects if your + application also monkey patches ``numpy.ma`` or relies on the broken + behavior. If unicode support of masked arrays is important to your + application, upgrade to Numpy 1.8 or later for best results. [#2059] + +- The developer documentation has been extensively rearranged and + rewritten. [#1712] + +- The ``human_time`` function in ``astropy.utils`` now returns strings + without zero padding. [#2420] + +- The ``bdist_dmg`` command for ``setup.py`` has now been removed. [#2553] + +- Many broken API links have been fixed in the documentation, and the + ``nitpick`` Sphinx option is now used to avoid broken links in future. + [#1221, #2019, #2109, #2161, #2162, #2192, #2200, #2296, #2448, #2456, + #2460, #2467, #2476, #2508, #2509] + + +0.3.2 (2014-05-13) +================== + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- if ``sep`` argument is specified to be a single character in + ``sexagisimal_to_string``, it now includes seperators only between + items [#2183] + +- Ensure comparisons involving ``Distance`` objects do not raise exceptions; + also ensure operations that lead to units other than length return + ``Quantity``. [#2206, #2250] + +- Multiplication and division of ``Angle`` objects is now + supported. [#2273] + +- Fixed ``Angle.to_string`` functionality so that negative angles have the + correct amount of padding when ``pad=True``. [#2337] + +- Mixing strings and quantities in the ``Angle`` constructor now + works. For example: ``Angle(['1d', 1. * u.d])``. [#2398] + +- If ``Longitude`` is given a ``Longitude`` as input, use its ``wrap_angle`` + by default [#2705] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- Fixed ``format()`` compatibility with Python 2.6. [#2129] + +- Be more careful about converting to floating point internally [#1815, #1818] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- The CDS reader in ``astropy.io.ascii`` can now handle multiple + description lines in ReadMe files. [#2225] + +- When reading a table with values that generate an overflow error during + type conversion (e.g. overflowing the native C long type), fall through to + using string. Previously this generated an exception [#2234]. + +- Recognize any string with one to four dashes as null value. [#1335] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Allow pickling of ``FITS_rec`` objects. [#1597] + +- Improved behavior when writing large compressed images on OSX by removing + an unnecessary check for platform architecture. [#2345] + +- Fixed an issue where Astropy ``Table`` objects containing boolean columns + were not correctly written out to FITS files. [#1953] + +- Several other bug fixes ported from PyFITS v3.2.3 [#2368] + +- Fixed a crash on Python 2.x when writing a FITS file directly to a + ``StringIO.StringIO`` object. [#2463] + +astropy.io.registry +^^^^^^^^^^^^^^^^^^^ + +- Allow readers/writers with the same name to be attached to different + classes. [#2312] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- By default, floating point values are now written out using + ``repr`` rather than ``str`` to preserve precision [#2137] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Fixed the ``SIP`` and ``InverseSIP`` models both so that they work in the + first place, and so that they return results consistent with the SIP + functions in ``astropy.wcs``. [#2177] + +astropy.stats +^^^^^^^^^^^^^ + +- Ensure the ``axis`` keyword in ``astropy.stats.funcs`` can now be used for + all axes. [#2173] + +astropy.table +^^^^^^^^^^^^^ + +- Ensure nameless columns can be printed, using 'None' for the header. [#2213] + +astropy.time +^^^^^^^^^^^^ + +- Fixed pickling of ``Time`` objects. [#2123] + +astropy.units +^^^^^^^^^^^^^ + +- ``Quantity._repr_latex_()`` returns ``NotImplementedError`` for quantity + arrays instead of an uninformative formatting exception. [#2258] + +- Ensure ``Quantity.flat`` always returns ``Quantity``. [#2251] + +- Angstrom unit renders better in MathJax [#2286] + +astropy.utils +^^^^^^^^^^^^^ + +- Progress bars will now be displayed inside the IPython + qtconsole. [#2230] + +- ``data.download_file()`` now evaluates ``REMOTE_TIMEOUT()`` at runtime + rather than import time. Previously, setting ``REMOTE_TIMEOUT`` after + import had no effect on the function's behavior. [#2302] + +- Progressbar will be limited to 100% so that the bar does not exceed the + terminal width. The numerical display can still exceed 100%, however. + +astropy.vo +^^^^^^^^^^ + +- Fixed ``format()`` compatibility with Python 2.6. [#2129] + +- Cone Search validation no longer raises ``ConeSearchError`` for positive RA. + [#2240, #2242] + +astropy.wcs +^^^^^^^^^^^ + +- Fixed a bug where calling ``astropy.wcs.Wcsprm.sub`` with + ``WCSSUB_CELESTIAL`` may cause memory corruption due to + underallocation of a temporary buffer. [#2350] + +- Fixed a memory allocation bug in ``astropy.wcs.Wcsprm.sub`` and + ``astropy.wcs.Wcsprm.copy``. [#2439] + +Misc +^^^^ + +- Fixes for compatibility with Python 3.4. [#1945] + +- ``import astropy; astropy.test()`` now correctly uses the same test + configuration as ``python setup.py test`` [#1811] + + +0.3.1 (2014-03-04) +================== + +Bug Fixes +--------- + +astropy.config +^^^^^^^^^^^^^^ + +- Fixed a bug where ``ConfigurationItem.set_temp()`` does not reset to + default value when exception is raised within ``with`` block. [#2117] + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- Fixed a bug where ``_truncation`` was left undefined for ``CustomKernel``. + [#2016] + +- Fixed a bug with ``_normalization`` when ``CustomKernel`` input array + sums to zero. [#2016] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Fixed a bug where using ``==`` on two array coordinates wouldn't + work. [#1832] + +- Fixed bug which caused ``len()`` not to work for coordinate objects and + added a ``.shape`` property to get appropriately array-like behavior. + [#1761, #2014] + +- Fixed a bug where sexagesimal notation would sometimes include + exponential notation in the last field. [#1908, #1913] + +- ``CompositeStaticMatrixTransform`` no longer attempts to reference the + undefined variable ``self.matrix`` during instantiation. [#1944] + +- Fixed pickling of ``Longitude``, ensuring ``wrap_angle`` is preserved + [#1961] + +- Allow ``sep`` argument in ``Angle.to_string`` to be empty (resulting in no + separators) [#1989] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Allow passing unicode delimiters when reading or writing tables. The + delimiter must be convertible to pure ASCII. [#1949] + +- Fix a problem when reading a table and renaming the columns to names that + already exist. [#1991] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Ported all bug fixes from PyFITS 3.2.1. See the PyFITS changelog at + http://pyfits.readthedocs.io/en/v3.2.1/ [#2056] + +astropy.io.misc +^^^^^^^^^^^^^^^ + +- Fixed issues in the HDF5 Table reader/writer functions that occurred on + Windows. [#2099] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- The ``write_null_values`` kwarg to ``VOTable.to_xml``, when set to `False` + (the default) would produce non-standard VOTable files. Therefore, this + functionality has been replaced by a better understanding that knows which + fields in a VOTable may be left empty (only ``char``, ``float`` and + ``double`` in VOTable 1.1 and 1.2, and all fields in VOTable 1.3). The + kwarg is still accepted but it will be ignored, and a warning is emitted. + [#1809] + +- Printing out a ``astropy.io.votable.tree.Table`` object using `repr` or + `str` now uses the pretty formatting in ``astropy.table``, so it's possible + to easily preview the contents of a ``VOTable``. [#1766] + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Fixed bug in computation of model derivatives in ``LinearLSQFitter``. + [#1903] + +- Raise a ``NotImplementedError`` when fitting composite models. [#1915] + +- Fixed bug in the computation of the ``Gaussian2D`` model. [#2038] + +- Fixed bug in the computation of the ``AiryDisk2D`` model. [#2093] + +astropy.sphinx +^^^^^^^^^^^^^^ + +- Added slightly more useful debug info for AstropyAutosummary. [#2024] + +astropy.table +^^^^^^^^^^^^^ + +- The column string representation for n-dimensional cells with only + one element has been fixed. [#1522] + +- Fix a problem that caused ``MaskedColumn.__getitem__`` to not preserve + column metadata. [#1471, #1872] + +- With Numpy prior to version 1.6.2, tables with Unicode columns now + sort correctly. [#1867] + +- ``astropy.table`` can now print out tables with Unicode columns containing + non-ascii characters. [#1864] + +- Columns can now be named with Unicode strings, as long as they contain only + ascii characters. This makes using ``astropy.table`` easier on Python 2 + when ``from __future__ import unicode_literals`` is used. [#1864] + +- Allow pickling of ``Table``, ``Column``, and ``MaskedColumn`` objects. [#792] + +- Fix a problem where it was not possible to rename columns after sorting or + adding a row. [#2039] + +astropy.time +^^^^^^^^^^^^ + +- Fix a problem where scale conversion problem in TimeFromEpoch + was not showing a useful error [#2046] + +- Fix a problem when converting to one of the formats ``unix``, ``cxcsec``, + ``gps`` or ``plot_date`` when the time scale is ``UT1``, ``TDB`` or ``TCB`` + [#1732] + +- Ensure that ``delta_ut1_utc`` gets calculated when accessed directly, + instead of failing and giving a rather obscure error message [#1925] + +- Fix a bug when computing the TDB to TT offset. The transform routine was + using meters instead of kilometers for the Earth vector. [#1929] + +- Increase ``__array_priority__`` so that ``TimeDelta`` can convert itself + to a ``Quantity`` also in reverse operations [#1940] + +- Correct hop list from TCG to TDB to ensure that conversion is + possible [#2074] + +astropy.units +^^^^^^^^^^^^^ + +- ``Quantity`` initialisation rewritten for speed [#1775] + +- Fixed minor string formatting issue for dimensionless quantities. [#1772] + +- Fix error for inplace operations on non-contiguous quantities [#1834]. + +- The definition of the unit ``bar`` has been corrected to "1e5 + Pascal" from "100 Pascal" [#1910] + +- For units that are close to known units, but not quite, for + example due to differences in case, the exception will now include + recommendations. [#1870] + +- The generic and FITS unit parsers now accept multiple slashes in + the unit string. There are multiple ways to interpret them, but + the approach taken here is to convert "m/s/kg" to "m s-1 kg-1". + Multiple slashes are accepted, but discouraged, by the FITS + standard, due to the ambiguity of parsing, so a warning is raised + when it is encountered. [#1911] + +- The use of "angstrom" (with a lower case "a") is now accepted in FITS unit + strings, since it is in common usage. However, since it is not officially + part of the FITS standard, a warning will be issued when it is encountered. + [#1911] + +- Pickling unrecognized units will not raise a ``AttributeError``. [#2047] + +- ``astropy.units`` now correctly preserves the precision of + fractional powers. [#2070] + +- If a ``Unit`` or ``Quantity`` is raised to a floating point power + that is very close to a rational number with a denominator less + than or equal to 10, it is converted to a ``Fraction`` object to + preserve its precision through complex unit conversion operations. + [#2070] + +astropy.utils +^^^^^^^^^^^^^ + +- Fixed crash in ``timer.RunTimePredictor.do_fit``. [#1905] + +- Fixed ``astropy.utils.compat.argparse`` for Python 3.1. [#2017] + +astropy.wcs +^^^^^^^^^^^ + +- ``astropy.wcs.WCS``, ``astropy.wcs.WCS.fix`` and + ``astropy.wcs.find_all_wcs`` now have a ``translate_units`` keyword + argument that is passed down to ``astropy.wcs.Wcsprm.fix``. This can be + used to specify any unsafe translations of units from rarely used ones to + more commonly used ones. + + Although ``"S"`` is commonly used to represent seconds, its translation to + ``"s"`` is potentially unsafe since the standard recognizes ``"S"`` + formally as Siemens, however rarely that may be used. The same applies to + ``"H"`` for hours (Henry), and ``"D"`` for days (Debye). + + When these sorts of changes are performed, a warning is emitted. + [#1854] + +- When a unit is "fixed" by ``astropy.wcs.WCS.fix`` or + ``astropy.wcs.Wcsprm.unitfix``, it now correctly reports the ``CUNIT`` + field that was changed. [#1854] + +- ``astropy.wcs.Wcs.printwcs`` will no longer warn that ``cdelt`` is being + ignored when none was present in the FITS file. [#1845] + +- ``astropy.wcs.Wcsprm.set`` is called from within the ``astropy.wcs.WCS`` + constructor, therefore any invalid information in the keywords will be + raised from the constructor, rather than on a subsequent call to a + transformation method. [#1918] + +- Fix a memory corruption bug when using ``astropy.wcs.Wcs.sub`` with + ``astropy.wcs.WCSSUB_CELESTIAL``. [#1960] + +- Fixed the ``AttributeError`` exception that was raised when using + ``astropy.wcs.WCS.footprint_to_file``. [#1912] + +- Fixed a ``NameError`` exception that was raised when using + ``astropy.wcs.validate`` or the ``wcslint`` script. [#2053] + +- Fixed a bug where named WCSes may be erroneously reported as ``' '`` when + using ``astropy.wcs.validate`` or the ``wcslint`` script. [#2053] + +- Fixed a bug where error messages about incorrect header keywords + may not be propagated correctly, resulting in a "NULL error object + in wcslib" message. [#2106] + +Misc +^^^^ + +- There are a number of improvements to make Astropy work better on big + endian platforms, such as MIPS, PPC, s390x and SPARC. [#1849] + +- The test suite will now raise exceptions when a deprecated feature of + Python or Numpy is used. [#1948] + +Other Changes and Additions +--------------------------- + +- A new function, ``astropy.wcs.get_include``, has been added to get the + location of the ``astropy.wcs`` C header files. [#1755] + +- The doctests in the ``.rst`` files in the ``docs`` folder are now + tested along with the other unit tests. This is in addition to the + testing of doctests in docstrings that was already being performed. + See ``docs/development/testguide.rst`` for more information. [#1771] + +- Fix a problem where import fails on Python 3 if setup.py exists + in current directory. [#1877] + + +0.3 (2013-11-20) +================ + +New Features +------------ + +- General + +- A top-level configuration item, ``unicode_output`` has been added to + control whether the Unicode string representation of certain + objects will contain Unicode characters. For example, when + ``use_unicode`` is `False` (default):: + + >>> from astropy import units as u + >>> print(unicode(u.degree)) + deg + + When ``use_unicode`` is `True`:: + + >>> from astropy import units as u + >>> print(unicode(u.degree)) + ° + + See `handling-unicode + `_ + for more information. [#1441] + +- ``astropy.utils.misc.find_api_page`` is now imported into the top-level. + This allows usage like ``astropy.find_api_page(astropy.units.Quantity)``. + [#1779] + +astropy.convolution +^^^^^^^^^^^^^^^^^^^ + +- New class-based system for generating kernels, replacing ``make_kernel``. + [#1255] The ``astropy.nddata.convolution`` sub-package has now been moved + to ``astropy.convolution``. [#1451] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Two classes ``astropy.coordinates.Longitude`` and + ``astropy.coordinates.Latitude`` have been added. These are derived from + the new ``Angle`` class and used for all longitude-like (RA, azimuth, + galactic L) and latitude-like coordinates (Dec, elevation, galactic B) + respectively. The ``Longitude`` class provides auto-wrapping capability + and ``Latitude`` performs bounds checking. + +- ``astropy.coordinates.Distance`` supports conversion to and from distance + modulii. [#1472] + +- ``astropy.coordinates.SphericalCoordinateBase`` and derived classes now + support arrays of coordinates, enabling large speed-ups for some operations + on multiple coordinates at the same time. These coordinates can also be + indexed using standard slicing or any Numpy-compatible indexing. [#1535, + #1615] + +- Array coordinates can be matched to other array coordinates, finding the + closest matches between the two sets of coordinates (see the + ``astropy.coordinates.matching.match_coordinates_3d`` and + ``astropy.coordinates.matching.match_coordinates_sky`` functions). [#1535] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- Added support for including massive Neutrinos in the cosmology classes. The + Planck (2013) cosmology has been updated to use this. [#1364] + +- Calculations now use and return ``Quantity`` objects where appropriate. + [#1237] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Added support for writing IPAC format tables [#1152]. + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Added initial support for table columns containing pseudo-unsigned + integers. This is currently enabled by using the ``uint=True`` option when + opening files; any table columns with the correct BZERO value will be + interpreted and returned as arrays of unsigned integers. [#906] + +- Upgraded vendored copy of CFITSIO to v3.35, though backwards compatibility + back to version v3.28 is maintained. + +- Added support for reading and writing tables using the Q format for columns. + The Q format is identical to the P format (variable-length arrays) except + that it uses 64-bit integers for the data descriptors, allowing more than + 4 GB of variable-length array data in a single table. + +- Some refactoring of the table and ``FITS_rec`` modules in order to better + separate the details of the FITS binary and ASCII table data structures from + the HDU data structures that encapsulate them. Most of these changes should + not be apparent to users (but see API Changes below). + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- Updated to support the VOTable 1.3 draft. [#433] + +- Added the ability to look up and group elements by their utype attribute. + [#622] + +- The format of the units of a VOTable file can be specified using the + ``unit_format`` parameter. Note that units are still always written out + using the CDS format, to ensure compatibility with the standard. + +astropy.modeling +^^^^^^^^^^^^^^^^ + +- Added a new framework for representing and evaluating mathematical models + and for fitting data to models. See "What's New in Astropy 0.3" in the + documentation for further details. [#493] + +astropy.stats +^^^^^^^^^^^^^ + +- Added robust statistics functions + ``astropy.stats.funcs.median_absolute_deviation``, + ``astropy.stats.funcs.biweight_location``, and + ``astropy.stats.funcs.biweight_midvariance``. [#621] + +- Added ``astropy.stats.funcs.signal_to_noise_oir_ccd`` for computing the + signal to noise ratio for source being observed in the optical/IR using a + CCD. [#870] + +- Add ``axis=int`` option to ``stropy.stats.funcs.sigma_clip`` to allow + clipping along a given axis for multidimensional data. [#1083] + +astropy.table +^^^^^^^^^^^^^ + +- New columns can be added to a table via assignment to a non-existing + column by name. [#726] + +- Added ``join`` function to perform a database-like join on two tables. This + includes support for inner, left, right, and outer joins as well as + metadata merging. [#903] + +- Added ``hstack`` and ``vstack`` functions to stack two or more tables. + [#937] + +- Tables now have a ``.copy`` method and include support for ``copy`` and + ``deepcopy``. [#1208] + +- Added support for selecting and manipulating groups within a table with + a database style ``group_by`` method. [#1424] + +- Table ``read`` and ``write`` functions now include rudimentary support + reading and writing of FITS tables via the unified reading/writing + interface. [#591] + +- The ``units`` and ``dtypes`` attributes and keyword arguments in Column, + MaskedColumn, Row, and Table are now deprecated in favor of the + single-tense ``unit`` and ``dtype``. [#1174] + +- Setting a column from a Quantity now correctly sets the unit on the Column + object. [#732] + +- Add ``remove_row`` and ``remove_rows`` to remove table rows. [#1230] + +- Added a new ``Table.show_in_browser`` method that opens a web browser + and displays the table rendered as HTML. [#1342] + +- New tables can now be instantiated using a single row from an existing + table. [#1417] + +astropy.time +^^^^^^^^^^^^ + +- New ``Time`` objects can be instantiated from existing ``Time`` objects + (but with different format, scale, etc.) [#889] + +- Added a ``Time.now`` classmethod that returns the current UTC time, + similarly to Python's ``datetime.now``. [#1061] + +- Update internal time manipulations so that arithmetic with Time and + TimeDelta objects maintains sub-nanosecond precision over a time span + longer than the age of the universe. [#1189] + +- Use ``astropy.utils.iers`` to provide ``delta_ut1_utc``, so that + automatic calculation of UT1 becomes possible. [#1145] + +- Add ``datetime`` format which allows converting to and from standard + library ``datetime.datetime`` objects. [#860] + +- Add ``plot_date`` format which allows converting to and from the date + representation used when plotting dates with matplotlib via the + ``matplotlib.pyplot.plot_date`` function. [#860] + +- Add ``gps`` format (seconds since 1980-01-01 00:00:00 UTC, + including leap seconds) [#1164] + +- Add array indexing to Time objects [#1132] + +- Allow for arithmetic of multi-element and single-element Time and TimeDelta + objects. [#1081] + +- Allow multiplication and division of TimeDelta objects by + constants and arrays, as well as changing sign (negation) and + taking the absolute value of TimeDelta objects. [#1082] + +- Allow comparisons of Time and TimeDelta objects. [#1171] + +- Support interaction of Time and Quantity objects that represent a time + interval. [#1431] + +astropy.units +^^^^^^^^^^^^^ + +- Added parallax equivalency for length-angle. [#985] + +- Added mass-energy equivalency. [#1333] + +- Added a new-style format method which will use format specifiers + (like ``0.03f``) in new-style format strings for the Quantity's value. + Specifiers which can't be applied to the value will fall back to the + entire string representation of the quantity. [#1383] + +- Added support for complex number values in quantities. [#1384] + +- Added new spectroscopic equivalencies for velocity conversions + (relativistic, optical, and radio conventions are supported) [#1200] + +- The ``spectral`` equivalency now also handles wave number. + +- The ``spectral_density`` equivalency now also accepts a Quantity for the + frequency or wavelength. It also handles additional flux units. + +- Added Brightness Temperature (antenna gain) equivalency for conversion + between :math:`T_B` and flux density. [#1327] + +- Added percent unit, and allowed any string containing just a number to be + interpreted as a scaled dimensionless unit. [#1409] + +- New-style format strings can be used to set the unit output format. For + example, ``"{0:latex}".format(u.km)`` will print with the latex formatter. + [#1462] + +- The ``Unit.is_equivalent`` method can now take a tuple. In this case, the + method returns ``True`` if the unit is equivalent to any of the units + listed in the tuple. [#1521] + +- ``def_unit`` can now take a 2-tuple of names of the form (short, long), + where each entry is a list. This allows for handling strange units that + might have multiple short names. [#1543] + +- Added ``dimensionless_angles`` equivalency, which allows conversion of any + power of radian to dimensionless. [#1161] + +- Added the ability to enable set of units, or equivalencies that are used by + default. Also provided context managers for these cases. [#1268] + +- Imperial units are disabled by default. [#1593, #1662] + +- Added an ``astropy.units.add_enabled_units`` context manager, which allows + creating a temporary context with additional units temporarily enabled in + the global units namespace. [#1662] + +- ``Unit`` instances now have ``.si`` and ``.cgs`` properties a la + ``Quantity``. These serve as shortcuts for ``Unit.to_system(cgs)[0]`` + etc. [#1610] + +astropy.vo +^^^^^^^^^^ + +- New package added to support Virtual Observatory Simple Cone Search query + and service validation. [#552] + +astropy.wcs +^^^^^^^^^^^ + +- Fixed attribute error in ``astropy.wcs.Wcsprm`` (lattype->lattyp) [#1463] + +- Included a new command-line script called ``wcslint`` and accompanying API + for validating the WCS in a given FITS file or header. [#580] + +- Upgraded included version of WCSLIB to 4.19. + +astropy.utils +^^^^^^^^^^^^^ + +- Added a new set of utilities in ``astropy.utils.timer`` for analyzing the + runtime of functions and making runtime predections for larger inputs. + [#743] + +- ``ProgressBar`` and ``Spinner`` classes can now be used directly to return + generator expressions. [#771] + +- Added ``astropy.utils.iers`` which allows reading in of IERS A or IERS B + bulletins and interpolation in UT1-UTC. + +- Added a function ``astropy.utils.find_api_page``--given a class or object + from the ``astropy`` package, this will open that class's API documentation + in a web browser. [#663] + +- Data download functions such as ``download_file`` now accept a + ``show_progress`` argument to suppress console output, and a ``timeout`` + argument. [#865, #1258] + +astropy.extern.six +^^^^^^^^^^^^^^^^^^ + +- Added `six `_ for python2/python3 + compatibility + +- Astropy now uses the ERFA library instead of the IAU SOFA library for + fundamental time transformation routines. The ERFA library is derived, with + permission, from the IAU SOFA library but is distributed under a BSD license. + See ``license/ERFA.rst`` for details. [#1293] + +astropy.logger +^^^^^^^^^^^^^^ + +- The Astropy logger now no longer catches exceptions by default, and also + only captures warnings emitted by Astropy itself (prior to this change, + following an import of Astropy, any warning got re-directed through the + Astropy logger). Logging to the Astropy log file has also been disabled by + default. However, users of Astropy 0.2 will likely still see the previous + behavior with Astropy 0.3 for exceptions and logging to file since the + default configuration file installed by 0.2 set the exception logging to be + on by default. To get the new behavior, set the ``log_exceptions`` and + ``log_to_file`` configuration items to ``False`` in the ``astropy.cfg`` + file. [#1331] + +API Changes +----------- + +- General + +- The configuration option ``utils.console.use_unicode`` has been + moved to the top level and renamed to ``unicode_output``. It now + not only affects console widgets, such as progress bars, but also + controls whether calling `unicode` on certain classes will return a + string containing unicode characters. + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- The ``astropy.coordinates.Angle`` class is now a subclass of + ``astropy.units.Quantity``. This means it has all of the methods of a + `numpy.ndarray`. [#1006] + +- The ``astropy.coordinates.Distance`` class is now a subclass of + ``astropy.units.Quantity``. This means it has all of the methods of a + `numpy.ndarray`. [#1472] + +- All angular units are now supported, not just ``radian``, ``degree`` and + ``hour``, but now ``arcsecond`` and ``arcminute`` as well. The object + will retain its native unit, so when printing out a value initially + provided in hours, its ``to_string()`` will, by default, also be + expressed in hours. + +- The ``Angle`` class now supports arrays of angles. + +- To be consistent with ``units.Unit``, ``Angle.format`` has been + deprecated and renamed to ``Angle.to_string``. + +- To be consistent with ``astropy.units``, all plural forms of unit names + have been removed. Therefore, the following properties of + ``astropy.coordinates.Angle`` should be renamed: + +- ``radians`` -> ``radian`` + +- ``degrees`` -> ``degree`` + +- ``hours`` -> ``hour`` + +- Multiplication and division of two ``Angle`` objects used to raise + ``NotImplementedError``. Now they raise ``TypeError``. + +- The ``astropy.coordinates.Angle`` class no longer has a ``bounds`` + attribute so there is no bounds-checking or auto-wrapping at this level. + This allows ``Angle`` objects to be used in arbitrary arithmetic + expressions (e.g. coordinate distance computation). + +- The ``astropy.coordinates.RA`` and ``astropy.coordinates.Dec`` classes have + been removed and replaced with ``astropy.coordinates.Longitude`` and + ``astropy.coordinates.Latitude`` respectively. These are now used for the + components of Galactic and Horizontal (Alt-Az) coordinates as well instead + of plain ``Angle`` objects. + +- ``astropy.coordinates.angles.rotation_matrix`` and + ``astropy.coordinates.angles.angle_axis`` now take a ``unit`` kwarg instead + of ``degrees`` kwarg to specify the units of the angles. + ``rotation_matrix`` will also take the unit from the given ``Angle`` object + if no unit is provided. + +- The ``AngularSeparation`` class has been removed. The output of the + coordinates ``separation()`` method is now an + ``astropy.coordinates.Angle``. [#1007] + +- The coordinate classes have been renamed in a way that remove the + ``Coordinates`` at the end of the class names. E.g., ``ICRSCoordinates`` + from previous versions is now called ``ICRS``. [#1614] + +- ``HorizontalCoordinates`` are now named ``AltAz``, to reflect more common + terminology. + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- The Planck (2013) cosmology will likely give slightly different (and more + accurate) results due to the inclusion of Neutrino masses. [#1364] + +- Cosmology class properties now return ``Quantity`` objects instead of + simple floating-point values. [#1237] + +- The names of cosmology instances are now truly optional, and are set to + ``None`` rather than the name of the class if the user does not provide + them. [#1705] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- In the ``read`` method of ``astropy.io.ascii``, empty column values in an + ASCII table are now treated as missing values instead of the previous + treatment as a zero-length string "". This now corresponds to the behavior + of other table readers like ``numpy.genfromtxt``. To restore the previous + behavior set ``fill_values=None`` in the call to ``ascii.read()``. [#919] + +- The ``read`` and ``write`` methods of ``astropy.io.ascii`` now have a + ``format`` argument for specifying the file format. This is the preferred + way to choose the format instead of the ``Reader`` and ``Writer`` + arguments. [#961] + +- The ``include_names`` and ``exclude_names`` arguments were removed from + the ``BaseHeader`` initializer, and now instead handled by the reader and + writer classes directly. [#1350] + +- Allow numeric and otherwise unusual column names when reading a table + where the ``format`` argument is specified, but other format details such + as the delimiter or quote character are being guessed. [#1692] + +- When reading an ASCII table using the ``Table.read()`` method, the default + has changed from ``guess=False`` to ``guess=True`` to allow auto-detection + of file format. This matches the default behavior of ``ascii.read()``. + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- The ``astropy.io.fits.new_table`` function is marked "pending deprecation". + This does not mean it will be removed outright or that its functionality + has changed. It will likely be replaced in the future for a function with + similar, if not subtly different functionality. A better, if not slightly + more verbose approach is to use ``pyfits.FITS_rec.from_columns`` to create + a new ``FITS_rec`` table--this has the same interface as + ``pyfits.new_table``. The difference is that it returns a plan + ``FITS_rec`` array, and not an HDU instance. This ``FITS_rec`` object can + then be used as the data argument in the constructors for ``BinTableHDU`` + (for binary tables) or ``TableHDU`` (for ASCII tables). This is analogous + to creating an ``ImageHDU`` by passing in an image array. + ``pyfits.FITS_rec.from_columns`` is just a simpler way of creating a + FITS-compatible recarray from a FITS column specification. + +- The ``updateHeader``, ``updateHeaderData``, and ``updateCompressedData`` + methods of the ``CompDataHDU`` class are pending deprecation and moved to + internal methods. The operation of these methods depended too much on + internal state to be used safely by users; instead they are invoked + automatically in the appropriate places when reading/writing compressed + image HDUs. + +- The ``CompDataHDU.compData`` attribute is pending deprecation in favor of + the clearer and more PEP-8 compatible ``CompDataHDU.compressed_data``. + +- The constructor for ``CompDataHDU`` has been changed to accept new keyword + arguments. The new keyword arguments are essentially the same, but are in + underscore_separated format rather than camelCase format. The old + arguments are still pending deprecation. + +- The internal attributes of HDU classes ``_hdrLoc``, ``_datLoc``, and + ``_datSpan`` have been replaced with ``_header_offset``, ``_data_offset``, + and ``_data_size`` respectively. The old attribute names are still pending + deprecation. This should only be of interest to advanced users who have + created their own HDU subclasses. + +- The following previously deprecated functions and methods have been removed + entirely: ``createCard``, ``createCardFromString``, ``upperKey``, + ``ColDefs.data``, ``setExtensionNameCaseSensitive``, ``_File.getfile``, + ``_TableBaseHDU.get_coldefs``, ``Header.has_key``, ``Header.ascardlist``. + +- Interfaces that were pending deprecation are now fully deprecated. These + include: ``create_card``, ``create_card_from_string``, ``upper_key``, + ``Header.get_history``, and ``Header.get_comment``. + +- The ``.name`` attribute on HDUs is now directly tied to the HDU's header, so + that if ``.header['EXTNAME']`` changes so does ``.name`` and vice-versa. + +astropy.io.registry +^^^^^^^^^^^^^^^^^^^ + +- Identifier functions for reading/writing Table and NDData objects should + now accept ``(origin, *args, **kwargs)`` instead of ``(origin, args, + kwargs)``. [#591] + +- Added a new ``astropy.io.registry.get_formats`` function for listing + registered I/O formats and details about the their readers/writers. [#1669] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- Added a new option ``use_names_over_ids`` option to use when converting + from VOTable objects to Astropy Tables. This can prevent a situation where + column names are not preserved when converting from a VOTable. [#609] + +astropy.nddata +^^^^^^^^^^^^^^ + +- The ``astropy.nddata.convolution`` sub-package has now been moved to + ``astropy.convolution``, and the ``make_kernel`` function has been removed. + (the kernel classes should be used instead) [#1451] + +astropy.stats.funcs +^^^^^^^^^^^^^^^^^^^ + +- For ``sigma_clip``, the ``maout`` optional parameter has been removed, and + the function now always returns a masked array. A new boolean parameter + ``copy`` can be used to indicated whether the input data should be copied + (``copy=True``, default) or used by reference (``copy=False``) in the + output masked array. [#1083] + +astropy.table +^^^^^^^^^^^^^ + +- The first argument to the ``Column`` and ``MaskedColumn`` classes is now + the data array--the ``name`` argument has been changed to an optional + keyword argument. [#840] + +- Added support for instantiating a ``Table`` from a list of dict, each one + representing a single row with the keys mapping to column names. [#901] + +- The plural 'units' and 'dtypes' have been switched to 'unit' and 'dtype' + where appropriate. The original attributes are still present in this + version as deprecated attributes, but will be removed in the next version. + [#1174] + +- The ``copy`` methods of ``Column`` and ``MaskedColumn`` were changed so + that the first argument is now ``order='C'``. This is required for + compatibility with Numpy 1.8 which is currently in development. [#1250] + +- Comparing a column (with == or !=) to a scalar, an array, or another column + now always returns a boolean Numpy array (which is a masked array if either + of the arguments in the comparison was masked). This is in contrast to the + previous behavior, which in some cases returned a boolean Numpy array, and + in some cases returned a boolean Column object. [#1446] + +astropy.time +^^^^^^^^^^^^ + +- For consistency with ``Quantity``, the attributes ``val`` and + ``is_scalar`` have been renamed to ``value`` and ``isscalar``, + respectively, and the attribute ``vals`` has been dropped. [#767] + +- The double-float64 internal representation of time is used more + efficiently to enable better accuracy. [#366] + +- Format and scale arguments are now allowed to be case-insensitive. [#1128] + +astropy.units +^^^^^^^^^^^^^ + +- The ``Quantity`` class now inherits from the Numpy array class, and + includes the following API changes [#929]: + +- Using ``float(...)``, ``int(...)``, and ``long(...)`` on a quantity will + now only work if the quantity is dimensionless and unscaled. + +- All Numpy ufuncs should now treat units correctly (or raise an exception + if not supported), rather than extract the value of quantities and + operate on this, emitting a warning about the implicit loss of units. + +- When using relevant Numpy ufuncs on dimensionless quantities (e.g. + ``np.exp(h * nu / (k_B * T))``), or combining dimensionless quantities + with Python scalars or plain Numpy arrays ``1 + v / c``, the + dimensionless Quantity will automatically be converted to an unscaled + dimensionless Quantity. + +- When initializing a quantity from a value with no unit, it is now set to + be dimensionless and unscaled by default. When initializing a Quantity + from another Quantity and with no unit specified in the initializer, the + unit is now taken from the unit of the Quantity being initialized from. + +- Strings are no longer allowed as the values for Quantities. [#1005] + +- Quantities are always comparable with zero regardless of their units. + [#1254] + +- The exception ``astropy.units.UnitsException`` has been renamed to + ``astropy.units.UnitsError`` to be more consistent with the naming + of built-in Python exceptions. [#1406] + +- Multiplication with and division by a string now always returns a Unit + (rather than a Quantity when the string was first) [#1408] + +- Imperial units are disabled by default. + +astropy.wcs +^^^^^^^^^^^ + +- For those including the ``astropy.wcs`` C headers in their project, they + should now include it as: + + #include "astropy_wcs/astropy_wcs_api.h" + + instead of: + + #include "astropy_wcs_api.h" + + [#1631] + +- The ``--enable-legacy`` option for ``setup.py`` has been removed. [#1493] + +Bug Fixes +--------- + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- The ``write()`` function was ignoring the ``fill_values`` argument. [#910] + +- Fixed an issue in ``DefaultSplitter.join`` where the delimiter attribute + was ignored when writing the CSV. [#1020] + +- Fixed writing of IPAC tables containing null values. [#1366] + +- When a table with no header row was read without specifying the format and + using the ``names`` argument, then the first row could be dropped. [#1692] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Binary tables containing compressed images may, optionally, contain other + columns unrelated to the tile compression convention. Although this is an + uncommon use case, it is permitted by the standard. + +- Reworked some of the file I/O routines to allow simpler, more consistent + mapping between OS-level file modes ('rb', 'wb', 'ab', etc.) and the more + "PyFITS-specific" modes used by PyFITS like "readonly" and "update". That + is, if reading a FITS file from an open file object, it doesn't matter as + much what "mode" it was opened in so long as it has the right capabilities + (read/write/etc.) Also works around bugs in the Python io module in 2.6+ + with regard to file modes. + +- Fixed a long-standing issue where writing binary tables did not correctly + write the TFORMn keywords for variable-length array columns (they omitted + the max array length parameter of the format). This was thought fixed in + an earlier version, but it was only fixed for compressed image HDUs and + not for binary tables in general. + +astropy.nddata +^^^^^^^^^^^^^^ + +- Fixed crash when trying to multiple or divide ``NDData`` objects with + uncertainties. [#1547] + +astropy.table +^^^^^^^^^^^^^ + +- Using a list of strings to index a table now correctly returns a new table + with the columns named in the list. [#1454] + +- Inequality operators now work properly with ``Column`` objects. [#1685] + +astropy.time +^^^^^^^^^^^^ + +- ``Time`` scale and format attributes are now shown when calling ``dir()`` + on a ``Time`` object. [#1130] + +astropy.wcs +^^^^^^^^^^^ + +- Fixed assignment to string-like WCS attributes on Python 3. [#956] + +astropy.units +^^^^^^^^^^^^^ + +- Fixed a bug that caused the order of multiplication/division of plain + Numpy arrays with Quantities to matter (i.e. if the plain array comes + first the units were not preserved in the output). [#899] + +- Directly instantiated ``CompositeUnits`` were made printable without + crashing. [#1576] + +Misc +^^^^ + +- Fixed various modules that hard-coded ``sys.stdout`` as default arguments + to functions at import time, rather than using the runtime value of + ``sys.stdout``. [#1648] + +- Minor documentation fixes and enhancements [#922, #1034, #1210, #1217, + #1491, #1492, #1498, #1582, #1608, #1621, #1646, #1670, #1756] + +- Fixed a crash that could sometimes occur when running the test suite on + systems with platform names containing non-ASCII characters. [#1698] + +Other Changes and Additions +--------------------------- + +- General + +- Astropy now follows the PSF Code of Conduct. [#1216] + +- Astropy's test suite now tests all doctests in inline docstrings. Support + for running doctests in the reST documentation is planned to follow in + v0.3.1. + +- Astropy's test suite can be run on multiple CPUs in parallel, often + greatly improving runtime, using the ``--parallel`` option. [#1040] + +- A warning is now issued when using Astropy with Numpy < 1.5--much of + Astropy may still work in this case but it shouldn't be expected to + either. [#1479] + +- Added automatic download/build/installation of Numpy during Astropy + installation if not already found. [#1483] + +- Handling of metadata for the ``NDData`` and ``Table`` classes has been + unified by way of a common ``MetaData`` descriptor--it allows instantiating + an object with metadata of any mapping type, and subsequently prevents + replacing the mapping stored in the ``.meta`` attribute (only direct + updates to that object are allowed). [#1686] + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Angles containing out of bounds minutes or seconds (e.g. 60) can be + parsed--the value modulo 60 is used with carry to the hours/minutes, and a + warning is issued rather than raising an exception. [#990] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- The new compression code also adds support for the ZQUANTIZ and ZDITHER0 + keywords added in more recent versions of this FITS Tile Compression spec. + This includes support for lossless compression with GZIP. (#198) By default + no dithering is used, but the ``SUBTRACTIVE_DITHER_1`` and + ``SUBTRACTIVE_DITHER_2`` methods can be enabled by passing the correct + constants to the ``quantize_method`` argument to the ``CompImageHDU`` + constructor. A seed can be manually specified, or automatically generated + using either the system clock or checksum-based methods via the + ``dither_seed`` argument. See the documentation for ``CompImageHDU`` for + more details. + +- Images compressed with the Tile Compression standard can now be larger than + 4 GB through support of the Q format. + +- All HDUs now have a ``.ver`` ``.level`` attribute that returns the value of + the EXTVAL and EXTLEVEL keywords from that HDU's header, if the exist. + This was added for consistency with the ``.name`` attribute which returns + the EXTNAME value from the header. + +- Then ``Column`` and ``ColDefs`` classes have new ``.dtype`` attributes + which give the Numpy dtype for the column data in the first case, and the + full Numpy compound dtype for each table row in the latter case. + +- There was an issue where new tables created defaulted the values in all + string columns to '0.0'. Now string columns are filled with empty strings + by default--this seems a less surprising default, but it may cause + differences with tables created with older versions of PyFITS or Astropy. + +astropy.io.misc +^^^^^^^^^^^^^^^ + +- The HDF5 reader can now refer to groups in the path as well as datasets; + if given a group, the first dataset in that group is read. [#1159] + +astropy.nddata +^^^^^^^^^^^^^^ + +- ``NDData`` objects have more helpful, though still rudimentary ``__str__` + and ``__repr__`` displays. [#1313] + +astropy.units +^^^^^^^^^^^^^ + +- Added 'cycle' unit. [#1160] + +- Extended units supported by the CDS formatter/parser. [#1468] + +- Added unicode an LaTeX symbols for liter. [#1618] + +astropy.wcs +^^^^^^^^^^^ + +- Redundant SCAMP distortion parameters are removed with SIP distortions are + also present. [#1278] + +- Added iterative implementation of ``all_world2pix`` that can be reliably + inverted. [#1281] + + +0.2.5 (2013-10-25) +================== + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Fixed incorrect string formatting of Angles using ``precision=0``. [#1319] + +- Fixed string formatting of Angles using ``decimal=True`` which ignored the + ``precision`` argument. [#1323] + +- Fixed parsing of format strings using appropriate unicode characters + instead of the ASCII ``-`` for minus signs. [#1429] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Fixed a crash in the IPAC table reader when the ``include/exclude_names`` + option is set. [#1348] + +- Fixed writing AASTex tables to honor the ``tabletype`` option. [#1372] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Improved round-tripping and preservation of manually assigned column + attributes (``TNULLn``, ``TSCALn``, etc.) in table HDU headers. (Note: This + issue was previously reported as fixed in Astropy v0.2.2 by mistake; it is + not fixed until v0.3.) [#996] + +- Fixed a bug that could cause a segfault when trying to decompress an + compressed HDU whose contents are truncated (due to a corrupt file, for + example). This still causes a Python traceback but better that than a + segfault. [#1332] + +- Newly created ``CompImageHDU`` HDUs use the correct value of the + ``DEFAULT_COMPRESSION_TYPE`` module-level constant instead of hard-coding + "RICE_1" in the header. + +- Fixed a corner case where when extra memory is allocated to compress an + image, it could lead to unnecessary in-memory copying of the compressed + image data and a possible memory leak through Numpy. + +- Fixed a bug where assigning from an mmap'd array in one FITS file over + the old (also mmap'd) array in another FITS file failed to update the + destination file. Corresponds to PyFITS issue 25. + +- Some miscellaneous documentation fixes. + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- Added a warning for when a VOTable 1.2 file contains no ``RESOURCES`` + elements (at least one should be present). [#1337] + +- Fixed a test failure specific to MIPS architecture caused by an errant + floating point warning. [#1179] + +astropy.nddata.convolution +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Prevented in-place modification of the input arrays to ``convolve()``. + [#1153] + +astropy.table +^^^^^^^^^^^^^ + +- Added HTML escaping for string values in tables when outputting the table + as HTML. [#1347] + +- Added a workaround in a bug in Numpy that could cause a crash when + accessing a table row in a masked table containing ``dtype=object`` + columns. [#1229] + +- Fixed an issue similar to the one in #1229, but specific to unmasked + tables. [#1403] + +astropy.units +^^^^^^^^^^^^^ + +- Improved error handling for unparseable units and fixed parsing CDS units + without mantissas in the exponent. [#1288] + +- Added a physical type for spectral flux density. [#1410] + +- Normalized conversions that should result in a scale of exactly 1.0 to + round off slight floating point imprecisions. [#1407] + +- Added support in the CDS unit parser/formatter for unusual unit prefixes + that are nonetheless required to be supported by that convention. [#1426] + +- Fixed the parsing of ``sqrt()`` in unit format strings which was returning + ``unit ** 2`` instead of ``unit ** 0.5``. [#1458] + +astropy.wcs +^^^^^^^^^^^ + +- When passing a single array to the wcs transformation functions, + (``astropy.wcs.Wcs.all_pix2world``, etc.), its second dimension must now + exactly match the number of dimensions in the transformation. [#1395] + +- Improved error message when incorrect arguments are passed to + ``WCS.wcs_world2pix``. [#1394] + +- Fixed a crash when trying to read WCS from FITS headers on Python 3.3 + in Windows. [#1363] + +- Only headers that are required as part of the WCSLIB C API are installed + by the package, per request of system packagers. [#1666] + +Misc +^^^^ + +- Fixed crash when the ``COLUMNS`` environment variable is set to a + non-integer value. [#1291] + +- Fixed a bug in ``ProgressBar.map`` where ``multiprocess=True`` could cause + it to hang on waiting for the process pool to be destroyed. [#1381] + +- Fixed a crash on Python 3.2 when affiliated packages try to use the + ``astropy.utils.data.get_pkg_data_*`` functions. [#1256] + +- Fixed a minor path normalization issue that could occur on Windows in + ``astropy.utils.data.get_pkg_data_filename``. [#1444] + +- Fixed an annoyance where configuration items intended only for testing + showed up in users' astropy.cfg files. [#1477] + +- Prevented crashes in exception logging in unusual cases where no traceback + is associated with the exception. [#1518] + +- Fixed a crash when running the tests in unusual environments where + ``sys.stdout.encoding`` is ``None``. [#1530] + +- Miscellaneous documentation fixes and improvements [#1308, #1317, #1377, + #1393, #1362, #1516] + +Other Changes and Additions +--------------------------- + +- Astropy installation now requests setuptools >= 0.7 during build/installation + if neither distribute or setuptools >= 0.7 is already installed. In other + words, if ``import setuptools`` fails, ``ez_setup.py`` is used to bootstrap + the latest setuptools (rather than using ``distribute_setup.py`` to bootstrap + the now obsolete distribute package). [#1197] + +- When importing Astropy from a source checkout without having built the + extension modules first an ``ImportError`` is raised rather than a + ``SystemExit`` exception. [#1269] + + +0.2.4 (2013-07-24) +================== + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Fixed the angle parser to support parsing the string "1 degree". [#1168] + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- Fixed a crash in the ``comoving_volume`` method on non-flat cosmologies + when passing it an array of redshifts. + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Fixed a bug that prevented saving changes to the comment symbol when + writing changes to a table. [#1167] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Added a workaround for a bug in 64-bit OSX that could cause truncation when + writing files greater than 2^32 bytes in size. [#839] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- Fixed incorrect reading of tables containing multiple ```` + elements. [#1223] + +astropy.table +^^^^^^^^^^^^^ + +- Fixed a bug where ``Table.remove_column`` and ``Table.rename_column`` + could cause a masked table to lose its masking. [#1120] + +- Fixed bugs where subclasses of ``Table`` did not preserver their class in + certain operations. [#1142] + +- Fixed a bug where slicing a masked table did not preserve the mask. [#1187] + +astropy.units +^^^^^^^^^^^^^ + +- Fixed a bug where the ``.si`` and ``.cgs`` properties of dimensionless + ``Quantity`` objects raised a ``ZeroDivisionError``. [#1150] + +- Fixed a bug where multiple subsequent calls to the ``.decompose()`` method + on array quantities applied a scale factor each time. [#1163] + +Misc +^^^^ + +- Fixed an installation crash that could occur sometimes on Debian/Ubuntu + and other \*NIX systems where ``pkg_resources`` can be installed without + installing ``setuptools``. [#1150] + +- Updated the ``distribute_setup.py`` bootstrapper to use setuptools >= 0.7 + when installing on systems that don't already have an up to date version + of distribute/setuptools. [#1180] + +- Changed the ``version.py`` template so that Astropy affiliated packages can + (and they should) use their own ``cython_version.py`` and + ``utils._compiler`` modules where appropriate. This issue only pertains to + affiliated package maintainers. [#1198] + +- Fixed a corner case where the default config file generation could crash + if building with matplotlib but *not* Sphinx installed in a virtualenv. + [#1225] + +- Fixed a crash that could occur in the logging module on systems that + don't have a default preferred encoding (in particular this happened + in some versions of PyCharm). [#1244] + +- The Astropy log now supports passing non-string objects (and calling + ``str()`` on them by default) to the logging methods, in line with Python's + standard logging API. [#1267] + +- Minor documentation fixes [#582, #696, #1154, #1194, #1212, #1213, #1246, + #1252] + +Other Changes and Additions +--------------------------- + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- Added a new ``Plank13`` object representing the Plank 2013 results. [#895] + +astropy.units +^^^^^^^^^^^^^ + +- Performance improvements in initialization of ``Quantity`` objects with + a large number of elements. [#1231] + + +0.2.3 (2013-05-30) +================== + +Bug Fixes +--------- + +astropy.time +^^^^^^^^^^^^ + +- Fixed inaccurate handling of leap seconds when converting from UTC to UNIX + timestamps. [#1118] + +- Tightened required accuracy in many of the time conversion tests. [#1121] + +Misc +^^^^ + +- Fixed a regression that was introduced in v0.2.2 by the fix to issue #992 + that was preventing installation of Astropy affiliated packages that use + Astropy's setup framework. [#1124] + + +0.2.2 (2013-05-21) +================== + +Bug Fixes +--------- + +astropy.io +^^^^^^^^^^ + +- Fixed issues in both the ``fits`` and ``votable`` sub-packages where array + byte order was not being handled consistently, leading to possible crashes + especially on big-endian systems. [#1003] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- When an error occurs opening a file in fitsdiff the exception message will + now at least mention which file had the error. + +- Fixed a couple cases where creating a new table using TDIMn in some of the + columns could cause a crash. + +- Slightly refactored how tables containing variable-length array columns are + handled to add two improvements: Fixes an issue where accessing the data + after a call to the ``astropy.io.fits.getdata`` convenience function caused + an exception, and allows the VLA data to be read from an existing mmap of + the FITS file. + +- Fixed a bug on Python 3 where attempting to open a non-existent file on + Python 3 caused a seemingly unrelated traceback. + +- Fixed an issue in the tests that caused some tests to fail if Astropy is + installed with read-only permissions. + +- Fixed a bug where instantiating a ``BinTableHDU`` from a numpy array + containing boolean fields converted all the values to ``False``. + +- Fixed an issue where passing an array of integers into the constructor of + ``Column()`` when the column type is floats of the same byte width caused + the column array to become garbled. + +- Fixed inconsistent behavior in creating CONTINUE cards from byte strings + versus unicode strings in Python 2--CONTINUE cards can now be created + properly from unicode strings (so long as they are convertable to ASCII). + +- Fixed a bug in parsing HIERARCH keywords that do not have a space after the + first equals sign (before the value). + +- Prevented extra leading whitespace on HIERARCH keywords from being treated + as part of the keyword. + +- Fixed a bug where HIERARCH keywords containing lower-case letters was + mistakenly marked as invalid during header validation along with an + ancillary issue where the ``Header.index()`` method id not work correctly + with HIERARCH keywords containing lower-case letters. + +- Disallowed assigning NaN and Inf floating point values as header values, + since the FITS standard does not define a way to represent them in. Because + this is undefined, the previous behavior did not make sense and produced + invalid FITS files. [#954] + +- Fixed an obscure issue that can occur on systems that don't have flush to + memory-mapped files implemented (namely GNU Hurd). [#968] + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- Stopped deprecation warnings from the ``astropy.io.votable`` package that + could occur during setup. [#970] + +- Fixed an issue where INFO elements were being incorrectly dropped when + occurring inside a TABLE element. [#1000] + +- Fixed obscure test failures on MIPS platforms. [#1010] + +astropy.nddata.convolution +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Fixed an issue in ``make_kernel()`` when using an Airy function kernel. + Also removed the superfluous 'brickwall' option. [#939] + +astropy.table +^^^^^^^^^^^^^ + +- Fixed a crash that could occur when adding a row to an empty (rowless) + table with masked columns. [#973] + +- Made it possible to assign to one table row from the value of another row, + effectively making it easier to copy rows, for example. [#1019] + +astropy.time +^^^^^^^^^^^^ + +- Added appropriate ``__copy__`` and ``__deepcopy__`` behavior; this + omission caused a seemingly unrelated error in FK5 coordinate separation. + [#891] + +astropy.units +^^^^^^^^^^^^^ + +- Fixed an issue where the ``isiterable()`` utility returned ``True`` for + quantities with scalar values. Added an ``__iter__`` method for the + ``Quantity`` class and fixed ``isiterable()`` to catch false positives. + [#878] + +- Fixed previously undefined behavior when multiplying a unit by a string. + [#949] + +- Added 'time' as a physical type--this was a simple omission. [#959] + +- Fixed issues with pickling unit objects so as to play nicer with the + multiprocessing module. [#974] + +- Made it more difficult to accidentally override existing units with a new + unit of the same name. [#1070] + +- Added several more physical types and units that were previously omitted, + including 'mass density', 'specific volume', 'molar volume', 'momentum', + 'angular momentum', 'angular speed', 'angular acceleration', 'electric + current', 'electric current density', 'electric field strength', 'electric + flux density', 'electric charge density', 'permittivity', 'electromagnetic + field strength', 'radiant intensity', 'data quantity', 'bandwidth'; and + 'knots', 'nautical miles', 'becquerels', and 'curies' respectively. [#1072] + +Misc +^^^^ + +- Fixed a permission error that could occur when running ``astropy.test()`` + on Python 3 when Astropy is installed as root. [#811] + +- Made it easier to filter warnings from the ``convolve()`` function and + from ``Quantity`` objects. [#853] + +- Fixed a crash that could occur in Python 3 when generation of the default + config file fails during setup. [#952] + +- Fixed an unrelated error message that could occur when trying to import + astropy from a source checkout without having build the extension modules + first. This issue was claimed to be fixed in v0.2.1, but the fix itself had + a bug. [#971] + +- Fixed a crash that could occur when running the ``build_sphinx`` setup + command in Python 3. [#977] + +- Added a more helpful error message when trying to run the + ``setup.py build_sphinx`` command when Sphinx is not installed. [#1027] + +- Minor documentation fixes and restructuring. + [#935, #967, #978, #1004, #1028, #1047] + +Other Changes and Additions +--------------------------- + +- Some performance improvements to the ``astropy.units`` package, in particular + improving the time it takes to import the sub-package. [#1015] + + +0.2.1 (2013-04-03) +================== + +Bug Fixes +--------- + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- Fixed encoding errors that could occur when formatting coordinate objects + in code using ``from __future__ import unicode_literals``. [#817] + +- Fixed a bug where the minus sign was dropped when string formatting dms + coordinates with -0 degrees. [#875] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Properly supports the ZQUANTIZ keyword used to support quantization + level--this includes working support for lossless GZIP compression of + images. + +- Fixed support for opening gzipped FITS files in a writeable mode. [#256] + +- Added a more helpful exception message when trying to read invalid values + from a table when the required ``TNULLn`` keyword is missing. [#309] + +- More refactoring of the tile compression handling to work around a + potential memory access violation that was particularly prevalent on + Windows. [#507] + +- Fixed an integer size mismatch in the compression module that could affect + 32-bit systems. [#786] + +- Fixed malformatting of the ``TFORMn`` keywords when writing compressed + image tables (they omitted the max array length parameter from the + variable-length array format). + +- Fixed a crash that could occur when writing a table containing multi- + dimensional array columns from an existing file into a new file. + +- Fixed a bug in fitsdiff that reported two header keywords containing NaN + as having different values. + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- Fixed links to the ``astropy.io.votable`` documentation in the VOTable + validator output. [#806] + +- When reading VOTables containing integers that are out of range for their + column type, display a warning rather than raising an exception. [#825] + +- Changed the default string format for floating point values for better + round-tripping. [#856] + +- Fixed opening VOTables through the ``Table.read()`` interface for tables + that have no names. [#927] + +- Fixed creation of VOTables from an Astropy table that does not have a data + mask. [#928] + +- Minor documentation fixes. [#932] + +astropy.nddata.convolution +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Added better handling of ``inf`` values to the ``convolve_fft`` family of + functions. [#893] + +astropy.table +^^^^^^^^^^^^^ + +- Fixed silent failure to assign values to a row on multiple columns. [#764] + +- Fixed various buggy behavior when viewing a table after sorting by one of + its columns. [#829] + +- Fixed using ``numpy.where()`` with table indexing. [#838] + +- Fixed a bug where opening a remote table with ``Table.read()`` could cause + the entire table to be downloaded twice. [#845] + +- Fixed a bug where ``MaskedColumn`` no longer worked if the column being + masked is renamed. [#916] + +astropy.units +^^^^^^^^^^^^^ + +- Added missing capability for array ``Quantity``\s to be initializable by + a list of ``Quantity``\s. [#835] + +- Fixed the definition of year and lightyear to be in terms of Julian year + per the IAU definition. [#861] + +- "degree" was removed from the list of SI base units. [#863] + +astropy.wcs +^^^^^^^^^^^ + +- Fixed ``TypeError`` when calling ``WCS.to_header_string()``. [#822] + +- Added new method ``WCS.all_world2pix`` for converting from world + coordinates to pixel space, including inversion of the astrometric + distortion correction. [#1066, #1281] + +Misc +^^^^ + +- Fixed a minor issue when installing with ``./setup.py develop`` on a fresh + git clone. This is likely only of interest to developers on Astropy. + [#725] + +- Fixes a crash with ``ImportError: No module named 'astropy.version'`` when + running setup.py from a source checkout for the first time on OSX with + Python 3.3. [#820] + +- Fixed an installation issue where running ``./setup.py install`` or when + installing with pip the ``.astropy`` directory gets created in the home + directory of the user running the command. The user's ``.astropy`` + directory should only be created when they use Astropy, not when they + install it. [#867] + +- Fixed an exception when creating a ``ProgressBar`` with a "total" of 0. + [#752] + +- Added better documentation of behavior that can occur when trying to import + the astropy package from within a source checkout without first building + the extension modules. [#795, #864] + +- Added link to the installation instructions in the README. [#797] + +- Catches segfaults in xmllint which can occur sometimes and is otherwise out + of our control. [#803] + +- Minor changes to the documentation template. [#805] + +- Fixed a minor exception handling bug in ``download_file()``. [#808] + +- Added cleanup of any temporary files if an error occurs in + ``download_file()``. [#857] + +- Filesystem free space is checked for before attempting to download a file + with ``download_file()``. [#858] + +- Fixed package data locating to work across symlinks--required to work with + some OS packaging layouts. [#827] + +- Fixed a bug when building Cython extensions where hidden files containing + ``.pyx`` extensions could cause the build to crash. This can be an issue + with software and filesystems that autogenerate hidden files. [#834] + +- Fixed bug that could cause a "script" called README.rst to be installed + in a bin directory. [#852] + +- Fixed some miscellaneous and mostly rare reference leaks caught by + cpychecker. [#914] + +Other Changes and Additions +--------------------------- + +- Added logo and branding for Windows binary installers. [#741] + +- Upgraded included version libexpat to 2.1.0. [#781] + +- ~25% performance improvement in unit composition/decomposition. [#836] + +- Added previously missing LaTeX formatting for ``L_sun`` and ``R_sun``. [#841] + +- ConfigurationItem\s now have a more useful and informative __repr__ + and improved documentation for how to use them. [#855] + +- Added a friendlier error message when trying to import astropy from a source + checkout without first building the extension modules inplace. [#864] + +- py.test now outputs more system information for help in debugging issues + from users. [#869] + +- Added unit definitions "mas" and "uas" for "milliarcsecond" and + "microarcsecond" respectively. [#892] + + +0.2 (2013-02-19) +================ + +New Features +------------ + +astropy.coordinates +^^^^^^^^^^^^^^^^^^^ + +- This new subpackage contains a representation of celestial coordinates, + and provides a wide range of related functionality. While + fully-functional, it is a work in progress and parts of the API may + change in subsequent releases. + +astropy.cosmology +^^^^^^^^^^^^^^^^^ + +- Update to include cosmologies with variable dark energy equations of state. + (This introduces some API incompatibilities with the older Cosmology + objects). + +- Added parameters for relativistic species (photons, neutrinos) to the + astropy.cosmology classes. The current treatment assumes that neutrinos are + massless. [#365] + +- Add a WMAP9 object using the final (9-year) WMAP parameters from + Hinshaw et al. 2013. It has also been made the default cosmology. + [#629, #724] + +- astropy.table I/O infrastructure for custom readers/writers + implemented. [#305] + +- Added support for reading/writing HDF5 files [#461] + +- Added support for masked tables with missing or invalid data [#451] + +- New ``astropy.time`` sub-package. [#332] + +- New ``astropy.units`` sub-package that includes a class for units + (``astropy.units.Unit``) and scalar quantities that have units + (``astropy.units.Quantity``). [#370, #445] + + This has the following effects on other sub-packages: + +- In ``astropy.wcs``, the ``wcs.cunit`` list now takes and returns + ``astropy.units.Unit`` objects. [#379] + +- In ``astropy.nddata``, units are now stored as ``astropy.units.Unit`` + objects. [#382] + +- In ``astropy.table``, units on columns are now stored as + ``astropy.units.Unit`` objects. [#380] + +- In ``astropy.constants``, constants are now stored as + ``astropy.units.Quantity`` objects. [#529] + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Improved integration with the ``astropy.table`` Table class so that + table and column metadata (e.g. keywords, units, description, + formatting) are directly available in the output table object. The + CDS, DAOphot, and IPAC format readers now provide this type of + integrated metadata. + +- Changed to using ``astropy.table`` masked tables instead of NumPy + masked arrays for tables with missing values. + +- Added SExtractor table reader to ``astropy.io.ascii`` [#420] + +- Removed the Memory reader class which was used to convert data input + passed to the ``write`` function into an internal table. Instead + ``write`` instantiates an astropy Table object using the data + input to ``write``. + +- Removed the NumpyOutputter as the output of reading a table is now + always a ``Table`` object. + +- Removed the option of supplying a function as a column output + formatter. + +- Added a new ``strip_whitespace`` keyword argument to the ``write`` + function. This controls whether whitespace is stripped from + the left and right sides of table elements before writing. + Default is True. + +- Fixed a bug in reading IPAC tables with null values. + +- Generalized I/O infrastructure so that ``astropy.nddata`` can also have + custom readers/writers [#659] + +astropy.wcs +^^^^^^^^^^^ + +- From updating the underlying wcslib 4.16: + +- When ``astropy.wcs.WCS`` constructs a default coordinate representation + it will give it the special name "DEFAULTS", and will not report "Found + one coordinate representation". + +Other Changes and Additions +--------------------------- + +- A configuration file with all options set to their defaults is now generated + when astropy is installed. This file will be pulled in as the users' + astropy configuration file the first time they ``import astropy``. [#498] + +- Astropy doc themes moved into ``astropy.sphinx`` to allow affiliated packages + to access them. + +- Added expanded documentation for the ``astropy.cosmology`` sub-package. + [#272] + +- Added option to disable building of "legacy" packages (pyfits, vo, etc.). + +- The value of the astronomical unit (au) has been updated to that adopted by + IAU 2012 Resolution B2, and the values of the pc and kpc constants have been + updated to reflect this. [#368] + +- Added links to the documentation pages to directly edit the documentation on + GitHub. [#347] + +- Several updates merged from ``pywcs`` into ``astropy.wcs`` [#384]: + +- Improved the reading of distortion images. + +- Added a new option to choose whether or not to write SIP coefficients. + +- Uses the ``relax`` option by default so that non-standard keywords are + allowed. [#585] + + +- Added HTML representation of tables in IPython notebook [#409] + +- Rewrote CFITSIO-based backend for handling tile compression of FITS files. + It now uses a standard CFITSIO instead of heavily modified pieces of CFITSIO + as before. Astropy ships with its own copy of CFITSIO v3.30, but system + packagers may choose instead to strip this out in favor of a + system-installed version of CFITSIO. This corresponds to PyFITS ticket 169. + [#318] + +- Moved ``astropy.config.data`` to ``astropy.utils.data`` and re-factored the + I/O routines to separate out the generic I/O code that can be used to open + any file or resource from the code used to access Astropy-related data. The + 'core' I/O routine is now ``get_readable_fileobj``, which can be used to + access any local as well as remote data, supports caching, and can decompress + gzip and bzip2 files on-the-fly. [#425] + +- Added a classmethod to + ``astropy.coordinates.coordsystems.SphericalCoordinatesBase`` that performs a + name resolve query using Sesame to retrieve coordinates for the requested + object. This works for any subclass of ``SphericalCoordinatesBase``, but + requires an internet connection. [#556] + +- astropy.nddata.convolution removed requirement of PyFFTW3; uses Numpy's + FFT by default instead with the added ability to specify an FFT + implementation to use. [#660] + + +Bug Fixes +--------- + +astropy.io.ascii +^^^^^^^^^^^^^^^^ + +- Fixed crash when pprinting a row with INDEF values. [#511] + +- Fixed failure when reading DAOphot files with empty keyword values. [#666] + +astropy.io.fits +^^^^^^^^^^^^^^^ + +- Improved handling of scaled images and pseudo-unsigned integer images in + compressed image HDUs. They now work more transparently like normal image + HDUs with support for the ``do_not_scale_image_data`` and ``uint`` options, + as well as ``scale_back`` and ``save_backup``. The ``.scale()`` method + works better too. Corresponds to PyFITS ticket 88. + +- Permits non-string values for the EXTNAME keyword when reading in a file, + rather than throwing an exception due to the malformatting. Added + verification for the format of the EXTNAME keyword when writing. + Corresponds to PyFITS ticket 96. + +- Added support for EXTNAME and EXTVER in PRIMARY HDUs. That is, if EXTNAME + is specified in the header, it will also be reflected in the ``.name`` + attribute and in ``fits.info()``. These keywords used to be verboten in + PRIMARY HDUs, but the latest version of the FITS standard allows them. + Corresponds to PyFITS ticket 151. + +- HCOMPRESS can again be used to compress data cubes (and higher-dimensional + arrays) so long as the tile size is effectively 2-dimensional. In fact, + compatible tile sizes will automatically be used even if they're not + explicitly specified. Corresponds to PyFITS ticket 171. + +- Fixed a bug that could cause a deadlock in the filesystem on OSX when + reading the data from certain types of FITS files. This only occurred + when used in conjunction with Numpy 1.7. [#369] + +- Added support for the optional ``endcard`` parameter in the + ``Header.fromtextfile()`` and ``Header.totextfile()`` methods. Although + ``endcard=False`` was a reasonable default assumption, there are still text + dumps of FITS headers that include the END card, so this should have been + more flexible. Corresponds to PyFITS ticket 176. + +- Fixed a crash when running fitsdiff on two empty (that is, zero row) tables. + Corresponds to PyFITS ticket 178. + +- Fixed an issue where opening a FITS file containing a random group HDU in + update mode could result in an unnecessary rewriting of the file even if + no changes were made. This corresponds to PyFITS ticket 179. + +- Fixed a crash when generating diff reports from diffs using the + ``ignore_comments`` options. Corresponds to PyFITS ticket 181. + +- Fixed some bugs with WCS distortion paper record-valued keyword cards: + +- Cards that looked kind of like RVKCs but were not intended to be were + over-permissively treated as such--commentary keywords like COMMENT and + HISTORY were particularly affected. Corresponds to PyFITS ticket 183. + +- Looking up a card in a header by its standard FITS keyword only should + always return the raw value of that card. That way cards containing + values that happen to valid RVKCs but were not intended to be will still + be treated like normal cards. Corresponds to PyFITS ticket 184. + +- Looking up a RVKC in a header with only part of the field-specifier (for + example "DP1.AXIS" instead of "DP1.AXIS.1") was implicitly treated as a + wildcard lookup. Corresponds to PyFITS ticket 184. + +- Fixed a crash when diffing two FITS files where at least one contains a + compressed image HDU which was not recognized as an image instead of a + table. Corresponds to PyFITS ticket 187. + +- Fixed a bug where opening a file containing compressed image HDUs in + 'update' mode and then immediately closing it without making any changes + caused the file to be rewritten unnecessarily. + +- Fixed two memory leaks that could occur when writing compressed image data, + or in some cases when opening files containing compressed image HDUs in + 'update' mode. + +- Fixed a bug where ``ImageHDU.scale(option='old')`` wasn't working at + all--it was not restoring the image to its original BSCALE and BZERO + values. + +- Fixed a bug when writing out files containing zero-width table columns, + where the TFIELDS keyword would be updated incorrectly, leaving the table + largely unreadable. + +- Fixed a minor string formatting issue. + +- Fixed bugs in the backwards compatibility layer for the ``CardList.index`` + and ``CardList.count`` methods. Corresponds to PyFITS ticket 190. + +- Improved ``__repr__`` and text file representation of cards with long + values that are split into CONTINUE cards. Corresponds to PyFITS ticket + 193. + +- Fixed a crash when trying to assign a long (> 72 character) value to blank + ('') keywords. This also changed how blank keywords are represented--there + are still exactly 8 spaces before any commentary content can begin; this + *may* affect the exact display of header cards that assumed there could be + fewer spaces in a blank keyword card before the content begins. However, + the current approach is more in line with the requirements of the FITS + standard. Corresponds to PyFITS ticket 194. + +astropy.io.votable +^^^^^^^^^^^^^^^^^^ + +- The ``Table`` class now maintains a single array object which is a + Numpy masked array. For variable-length columns, the object that + is stored there is also a Numpy masked array. + +- Changed the ``pedantic`` configuration option to be ``False`` by default + due to the vast proliferation of non-compliant VO Tables. [#296] + +- Renamed ``astropy.io.vo`` to ``astropy.io.votable``. + +astropy.table +^^^^^^^^^^^^^ + +- Added a workaround for an upstream bug in Numpy 1.6.2 that could cause + a maximum recursion depth RuntimeError when printing table rows. [#341] + +astropy.wcs +^^^^^^^^^^^ + +- Updated to wcslib 4.15 [#418] + +- Fixed a problem with handling FITS headers on locales that do not use + dot as a decimal separator. This required an upstream fix to wcslib which + is included in wcslib 4.14. [#313] + +- Fixed some tests that could fail due to missing/incorrect logging + configuration--ensures that tests don't have any impact on the default log + location or contents. [#291] + +- Various minor documentation fixes [#293 and others] + +- Fixed a bug where running the tests with the ``py.test`` command still tried + to replace the system-installed pytest with the one bundled with Astropy. + [#454] + +- Improved multiprocessing compatibility for file downloads. [#615] + +- Fixed handling of Cython modules when building from a source checkout of a + tagged release version. [#594] + +- Added a workaround for a bug in Sphinx that could occur when using the + ``:tocdepth:`` directive. [#595] + +- Minor VOTable fixes [#596] + +- Fixed how ``setup.py`` uses ``distribute_setup.py`` to prevent possible + ``VersionConflict`` errors when an older version of distribute is already + installed on the user's system. [#616][#640] + +- Changed use of ``log.warn`` in the logging module to ``log.warning`` since + the former is deprecated. [#624] + + +0.1 (2012-06-19) +================ + +- Initial release. diff --git a/LICENSE.rst b/LICENSE.rst new file mode 100644 index 0000000..1110225 --- /dev/null +++ b/LICENSE.rst @@ -0,0 +1,26 @@ +Copyright (c) 2011-2017, Astropy Developers + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of the Astropy Team nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/PKG-INFO b/PKG-INFO new file mode 100644 index 0000000..b66f335 --- /dev/null +++ b/PKG-INFO @@ -0,0 +1,30 @@ +Metadata-Version: 1.2 +Name: astropy +Version: 2.0.3 +Summary: Community-developed python astronomy tools +Home-page: http://astropy.org +Author: The Astropy Developers +Author-email: astropy.team@gmail.com +License: BSD +Description-Content-Type: UNKNOWN +Description: + Astropy is a package intended to contain core functionality and some + common tools needed for performing astronomy and astrophysics research with + Python. It also provides an index for other astronomy packages and tools for + managing them. + +Keywords: astronomy,astrophysics,cosmology,space,science,units,table,wcs,vo,samp,coordinate,fits,modeling,models,fitting,ascii +Platform: UNKNOWN +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: C +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Scientific/Engineering :: Astronomy +Classifier: Topic :: Scientific/Engineering :: Physics +Requires: numpy +Provides: astropy +Requires-Python: >=2.7 diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..bb766dd --- /dev/null +++ b/README.rst @@ -0,0 +1,51 @@ +======= +Astropy +======= + +.. image:: https://img.shields.io/pypi/v/astropy.svg + :target: https://pypi.python.org/pypi/astropy + +Astropy (http://www.astropy.org) is a package intended to contain much of +the core functionality and some common tools needed for performing +astronomy and astrophysics with Python. + +Releases are `registered on PyPI `_, +and development is occurring at the +`project's github page `_. + +For installation instructions, see the `online documentation `_ +or ``docs/install.rst`` in this source distribution. + +For system packagers: Please install Astropy with the command:: + + $ python setup.py --offline install + +This will prevent the astropy_helpers bootstrap script from attempting to +reach out to PyPI. + +Project Status +-------------- + +.. image:: https://travis-ci.org/astropy/astropy.svg + :target: https://travis-ci.org/astropy/astropy + :alt: Astropy's Travis CI Status + +.. image:: https://coveralls.io/repos/astropy/astropy/badge.svg + :target: https://coveralls.io/r/astropy/astropy + :alt: Astropy's Coveralls Status + +.. image:: https://ci.appveyor.com/api/projects/status/ym7lxajcs5qwm31e/branch/master?svg=true + :target: https://ci.appveyor.com/project/Astropy/astropy/branch/master + :alt: Astropy's Appveyor Status + +For an overview of the testing and build status of all packages associated +with the Astropy Project, see http://dashboard.astropy.org. + +.. image:: https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A + :target: http://numfocus.org + :alt: Powered by NumFOCUS + +License +------- +Astropy is licensed under a 3-clause BSD style license - see the +``LICENSE.rst`` file. diff --git a/ah_bootstrap.py b/ah_bootstrap.py new file mode 100644 index 0000000..786b8b1 --- /dev/null +++ b/ah_bootstrap.py @@ -0,0 +1,958 @@ +""" +This bootstrap module contains code for ensuring that the astropy_helpers +package will be importable by the time the setup.py script runs. It also +includes some workarounds to ensure that a recent-enough version of setuptools +is being used for the installation. + +This module should be the first thing imported in the setup.py of distributions +that make use of the utilities in astropy_helpers. If the distribution ships +with its own copy of astropy_helpers, this module will first attempt to import +from the shipped copy. However, it will also check PyPI to see if there are +any bug-fix releases on top of the current version that may be useful to get +past platform-specific bugs that have been fixed. When running setup.py, use +the ``--offline`` command-line option to disable the auto-upgrade checks. + +When this module is imported or otherwise executed it automatically calls a +main function that attempts to read the project's setup.cfg file, which it +checks for a configuration section called ``[ah_bootstrap]`` the presences of +that section, and options therein, determine the next step taken: If it +contains an option called ``auto_use`` with a value of ``True``, it will +automatically call the main function of this module called +`use_astropy_helpers` (see that function's docstring for full details). +Otherwise no further action is taken (however, +``ah_bootstrap.use_astropy_helpers`` may be called manually from within the +setup.py script). + +Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same +names as the arguments to `use_astropy_helpers`, and can be used to configure +the bootstrap script when ``auto_use = True``. + +See https://github.com/astropy/astropy-helpers for more details, and for the +latest version of this module. +""" + +import contextlib +import errno +import imp +import io +import locale +import os +import re +import subprocess as sp +import sys + +try: + from ConfigParser import ConfigParser, RawConfigParser +except ImportError: + from configparser import ConfigParser, RawConfigParser + + +if sys.version_info[0] < 3: + _str_types = (str, unicode) + _text_type = unicode + PY3 = False +else: + _str_types = (str, bytes) + _text_type = str + PY3 = True + + +# What follows are several import statements meant to deal with install-time +# issues with either missing or misbehaving pacakges (including making sure +# setuptools itself is installed): + + +# Some pre-setuptools checks to ensure that either distribute or setuptools >= +# 0.7 is used (over pre-distribute setuptools) if it is available on the path; +# otherwise the latest setuptools will be downloaded and bootstrapped with +# ``ez_setup.py``. This used to be included in a separate file called +# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py +try: + import pkg_resources + _setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7') + # This may raise a DistributionNotFound in which case no version of + # setuptools or distribute is properly installed + _setuptools = pkg_resources.get_distribution('setuptools') + if _setuptools not in _setuptools_req: + # Older version of setuptools; check if we have distribute; again if + # this results in DistributionNotFound we want to give up + _distribute = pkg_resources.get_distribution('distribute') + if _setuptools != _distribute: + # It's possible on some pathological systems to have an old version + # of setuptools and distribute on sys.path simultaneously; make + # sure distribute is the one that's used + sys.path.insert(1, _distribute.location) + _distribute.activate() + imp.reload(pkg_resources) +except: + # There are several types of exceptions that can occur here; if all else + # fails bootstrap and use the bootstrapped version + from ez_setup import use_setuptools + use_setuptools() + + +# typing as a dependency for 1.6.1+ Sphinx causes issues when imported after +# initializing submodule with ah_boostrap.py +# See discussion and references in +# https://github.com/astropy/astropy-helpers/issues/302 + +try: + import typing # noqa +except ImportError: + pass + + +# Note: The following import is required as a workaround to +# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this +# module now, it will get cleaned up after `run_setup` is called, but that will +# later cause the TemporaryDirectory class defined in it to stop working when +# used later on by setuptools +try: + import setuptools.py31compat # noqa +except ImportError: + pass + + +# matplotlib can cause problems if it is imported from within a call of +# run_setup(), because in some circumstances it will try to write to the user's +# home directory, resulting in a SandboxViolation. See +# https://github.com/matplotlib/matplotlib/pull/4165 +# Making sure matplotlib, if it is available, is imported early in the setup +# process can mitigate this (note importing matplotlib.pyplot has the same +# issue) +try: + import matplotlib + matplotlib.use('Agg') + import matplotlib.pyplot +except: + # Ignore if this fails for *any* reason* + pass + + +# End compatibility imports... + + +# In case it didn't successfully import before the ez_setup checks +import pkg_resources + +from setuptools import Distribution +from setuptools.package_index import PackageIndex +from setuptools.sandbox import run_setup + +from distutils import log +from distutils.debug import DEBUG + + +# TODO: Maybe enable checking for a specific version of astropy_helpers? +DIST_NAME = 'astropy-helpers' +PACKAGE_NAME = 'astropy_helpers' + +# Defaults for other options +DOWNLOAD_IF_NEEDED = True +INDEX_URL = 'https://pypi.python.org/simple' +USE_GIT = True +OFFLINE = False +AUTO_UPGRADE = True + +# A list of all the configuration options and their required types +CFG_OPTIONS = [ + ('auto_use', bool), ('path', str), ('download_if_needed', bool), + ('index_url', str), ('use_git', bool), ('offline', bool), + ('auto_upgrade', bool) +] + + +class _Bootstrapper(object): + """ + Bootstrapper implementation. See ``use_astropy_helpers`` for parameter + documentation. + """ + + def __init__(self, path=None, index_url=None, use_git=None, offline=None, + download_if_needed=None, auto_upgrade=None): + + if path is None: + path = PACKAGE_NAME + + if not (isinstance(path, _str_types) or path is False): + raise TypeError('path must be a string or False') + + if PY3 and not isinstance(path, _text_type): + fs_encoding = sys.getfilesystemencoding() + path = path.decode(fs_encoding) # path to unicode + + self.path = path + + # Set other option attributes, using defaults where necessary + self.index_url = index_url if index_url is not None else INDEX_URL + self.offline = offline if offline is not None else OFFLINE + + # If offline=True, override download and auto-upgrade + if self.offline: + download_if_needed = False + auto_upgrade = False + + self.download = (download_if_needed + if download_if_needed is not None + else DOWNLOAD_IF_NEEDED) + self.auto_upgrade = (auto_upgrade + if auto_upgrade is not None else AUTO_UPGRADE) + + # If this is a release then the .git directory will not exist so we + # should not use git. + git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git')) + if use_git is None and not git_dir_exists: + use_git = False + + self.use_git = use_git if use_git is not None else USE_GIT + # Declared as False by default--later we check if astropy-helpers can be + # upgraded from PyPI, but only if not using a source distribution (as in + # the case of import from a git submodule) + self.is_submodule = False + + @classmethod + def main(cls, argv=None): + if argv is None: + argv = sys.argv + + config = cls.parse_config() + config.update(cls.parse_command_line(argv)) + + auto_use = config.pop('auto_use', False) + bootstrapper = cls(**config) + + if auto_use: + # Run the bootstrapper, otherwise the setup.py is using the old + # use_astropy_helpers() interface, in which case it will run the + # bootstrapper manually after reconfiguring it. + bootstrapper.run() + + return bootstrapper + + @classmethod + def parse_config(cls): + if not os.path.exists('setup.cfg'): + return {} + + cfg = ConfigParser() + + try: + cfg.read('setup.cfg') + except Exception as e: + if DEBUG: + raise + + log.error( + "Error reading setup.cfg: {0!r}\n{1} will not be " + "automatically bootstrapped and package installation may fail." + "\n{2}".format(e, PACKAGE_NAME, _err_help_msg)) + return {} + + if not cfg.has_section('ah_bootstrap'): + return {} + + config = {} + + for option, type_ in CFG_OPTIONS: + if not cfg.has_option('ah_bootstrap', option): + continue + + if type_ is bool: + value = cfg.getboolean('ah_bootstrap', option) + else: + value = cfg.get('ah_bootstrap', option) + + config[option] = value + + return config + + @classmethod + def parse_command_line(cls, argv=None): + if argv is None: + argv = sys.argv + + config = {} + + # For now we just pop recognized ah_bootstrap options out of the + # arg list. This is imperfect; in the unlikely case that a setup.py + # custom command or even custom Distribution class defines an argument + # of the same name then we will break that. However there's a catch22 + # here that we can't just do full argument parsing right here, because + # we don't yet know *how* to parse all possible command-line arguments. + if '--no-git' in argv: + config['use_git'] = False + argv.remove('--no-git') + + if '--offline' in argv: + config['offline'] = True + argv.remove('--offline') + + return config + + def run(self): + strategies = ['local_directory', 'local_file', 'index'] + dist = None + + # First, remove any previously imported versions of astropy_helpers; + # this is necessary for nested installs where one package's installer + # is installing another package via setuptools.sandbox.run_setup, as in + # the case of setup_requires + for key in list(sys.modules): + try: + if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'): + del sys.modules[key] + except AttributeError: + # Sometimes mysterious non-string things can turn up in + # sys.modules + continue + + # Check to see if the path is a submodule + self.is_submodule = self._check_submodule() + + for strategy in strategies: + method = getattr(self, 'get_{0}_dist'.format(strategy)) + dist = method() + if dist is not None: + break + else: + raise _AHBootstrapSystemExit( + "No source found for the {0!r} package; {0} must be " + "available and importable as a prerequisite to building " + "or installing this package.".format(PACKAGE_NAME)) + + # This is a bit hacky, but if astropy_helpers was loaded from a + # directory/submodule its Distribution object gets a "precedence" of + # "DEVELOP_DIST". However, in other cases it gets a precedence of + # "EGG_DIST". However, when activing the distribution it will only be + # placed early on sys.path if it is treated as an EGG_DIST, so always + # do that + dist = dist.clone(precedence=pkg_resources.EGG_DIST) + + # Otherwise we found a version of astropy-helpers, so we're done + # Just active the found distribution on sys.path--if we did a + # download this usually happens automatically but it doesn't hurt to + # do it again + # Note: Adding the dist to the global working set also activates it + # (makes it importable on sys.path) by default. + + try: + pkg_resources.working_set.add(dist, replace=True) + except TypeError: + # Some (much) older versions of setuptools do not have the + # replace=True option here. These versions are old enough that all + # bets may be off anyways, but it's easy enough to work around just + # in case... + if dist.key in pkg_resources.working_set.by_key: + del pkg_resources.working_set.by_key[dist.key] + pkg_resources.working_set.add(dist) + + @property + def config(self): + """ + A `dict` containing the options this `_Bootstrapper` was configured + with. + """ + + return dict((optname, getattr(self, optname)) + for optname, _ in CFG_OPTIONS if hasattr(self, optname)) + + def get_local_directory_dist(self): + """ + Handle importing a vendored package from a subdirectory of the source + distribution. + """ + + if not os.path.isdir(self.path): + return + + log.info('Attempting to import astropy_helpers from {0} {1!r}'.format( + 'submodule' if self.is_submodule else 'directory', + self.path)) + + dist = self._directory_import() + + if dist is None: + log.warn( + 'The requested path {0!r} for importing {1} does not ' + 'exist, or does not contain a copy of the {1} ' + 'package.'.format(self.path, PACKAGE_NAME)) + elif self.auto_upgrade and not self.is_submodule: + # A version of astropy-helpers was found on the available path, but + # check to see if a bugfix release is available on PyPI + upgrade = self._do_upgrade(dist) + if upgrade is not None: + dist = upgrade + + return dist + + def get_local_file_dist(self): + """ + Handle importing from a source archive; this also uses setup_requires + but points easy_install directly to the source archive. + """ + + if not os.path.isfile(self.path): + return + + log.info('Attempting to unpack and import astropy_helpers from ' + '{0!r}'.format(self.path)) + + try: + dist = self._do_download(find_links=[self.path]) + except Exception as e: + if DEBUG: + raise + + log.warn( + 'Failed to import {0} from the specified archive {1!r}: ' + '{2}'.format(PACKAGE_NAME, self.path, str(e))) + dist = None + + if dist is not None and self.auto_upgrade: + # A version of astropy-helpers was found on the available path, but + # check to see if a bugfix release is available on PyPI + upgrade = self._do_upgrade(dist) + if upgrade is not None: + dist = upgrade + + return dist + + def get_index_dist(self): + if not self.download: + log.warn('Downloading {0!r} disabled.'.format(DIST_NAME)) + return None + + log.warn( + "Downloading {0!r}; run setup.py with the --offline option to " + "force offline installation.".format(DIST_NAME)) + + try: + dist = self._do_download() + except Exception as e: + if DEBUG: + raise + log.warn( + 'Failed to download and/or install {0!r} from {1!r}:\n' + '{2}'.format(DIST_NAME, self.index_url, str(e))) + dist = None + + # No need to run auto-upgrade here since we've already presumably + # gotten the most up-to-date version from the package index + return dist + + def _directory_import(self): + """ + Import astropy_helpers from the given path, which will be added to + sys.path. + + Must return True if the import succeeded, and False otherwise. + """ + + # Return True on success, False on failure but download is allowed, and + # otherwise raise SystemExit + path = os.path.abspath(self.path) + + # Use an empty WorkingSet rather than the man + # pkg_resources.working_set, since on older versions of setuptools this + # will invoke a VersionConflict when trying to install an upgrade + ws = pkg_resources.WorkingSet([]) + ws.add_entry(path) + dist = ws.by_key.get(DIST_NAME) + + if dist is None: + # We didn't find an egg-info/dist-info in the given path, but if a + # setup.py exists we can generate it + setup_py = os.path.join(path, 'setup.py') + if os.path.isfile(setup_py): + with _silence(): + run_setup(os.path.join(path, 'setup.py'), + ['egg_info']) + + for dist in pkg_resources.find_distributions(path, True): + # There should be only one... + return dist + + return dist + + def _do_download(self, version='', find_links=None): + if find_links: + allow_hosts = '' + index_url = None + else: + allow_hosts = None + index_url = self.index_url + + # Annoyingly, setuptools will not handle other arguments to + # Distribution (such as options) before handling setup_requires, so it + # is not straightforward to programmatically augment the arguments which + # are passed to easy_install + class _Distribution(Distribution): + def get_option_dict(self, command_name): + opts = Distribution.get_option_dict(self, command_name) + if command_name == 'easy_install': + if find_links is not None: + opts['find_links'] = ('setup script', find_links) + if index_url is not None: + opts['index_url'] = ('setup script', index_url) + if allow_hosts is not None: + opts['allow_hosts'] = ('setup script', allow_hosts) + return opts + + if version: + req = '{0}=={1}'.format(DIST_NAME, version) + else: + req = DIST_NAME + + attrs = {'setup_requires': [req]} + + try: + if DEBUG: + _Distribution(attrs=attrs) + else: + with _silence(): + _Distribution(attrs=attrs) + + # If the setup_requires succeeded it will have added the new dist to + # the main working_set + return pkg_resources.working_set.by_key.get(DIST_NAME) + except Exception as e: + if DEBUG: + raise + + msg = 'Error retrieving {0} from {1}:\n{2}' + if find_links: + source = find_links[0] + elif index_url != INDEX_URL: + source = index_url + else: + source = 'PyPI' + + raise Exception(msg.format(DIST_NAME, source, repr(e))) + + def _do_upgrade(self, dist): + # Build up a requirement for a higher bugfix release but a lower minor + # release (so API compatibility is guaranteed) + next_version = _next_version(dist.parsed_version) + + req = pkg_resources.Requirement.parse( + '{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version)) + + package_index = PackageIndex(index_url=self.index_url) + + upgrade = package_index.obtain(req) + + if upgrade is not None: + return self._do_download(version=upgrade.version) + + def _check_submodule(self): + """ + Check if the given path is a git submodule. + + See the docstrings for ``_check_submodule_using_git`` and + ``_check_submodule_no_git`` for further details. + """ + + if (self.path is None or + (os.path.exists(self.path) and not os.path.isdir(self.path))): + return False + + if self.use_git: + return self._check_submodule_using_git() + else: + return self._check_submodule_no_git() + + def _check_submodule_using_git(self): + """ + Check if the given path is a git submodule. If so, attempt to initialize + and/or update the submodule if needed. + + This function makes calls to the ``git`` command in subprocesses. The + ``_check_submodule_no_git`` option uses pure Python to check if the given + path looks like a git submodule, but it cannot perform updates. + """ + + cmd = ['git', 'submodule', 'status', '--', self.path] + + try: + log.info('Running `{0}`; use the --no-git option to disable git ' + 'commands'.format(' '.join(cmd))) + returncode, stdout, stderr = run_cmd(cmd) + except _CommandNotFound: + # The git command simply wasn't found; this is most likely the + # case on user systems that don't have git and are simply + # trying to install the package from PyPI or a source + # distribution. Silently ignore this case and simply don't try + # to use submodules + return False + + stderr = stderr.strip() + + if returncode != 0 and stderr: + # Unfortunately the return code alone cannot be relied on, as + # earlier versions of git returned 0 even if the requested submodule + # does not exist + + # This is a warning that occurs in perl (from running git submodule) + # which only occurs with a malformatted locale setting which can + # happen sometimes on OSX. See again + # https://github.com/astropy/astropy/issues/2749 + perl_warning = ('perl: warning: Falling back to the standard locale ' + '("C").') + if not stderr.strip().endswith(perl_warning): + # Some other unknown error condition occurred + log.warn('git submodule command failed ' + 'unexpectedly:\n{0}'.format(stderr)) + return False + + # Output of `git submodule status` is as follows: + # + # 1: Status indicator: '-' for submodule is uninitialized, '+' if + # submodule is initialized but is not at the commit currently indicated + # in .gitmodules (and thus needs to be updated), or 'U' if the + # submodule is in an unstable state (i.e. has merge conflicts) + # + # 2. SHA-1 hash of the current commit of the submodule (we don't really + # need this information but it's useful for checking that the output is + # correct) + # + # 3. The output of `git describe` for the submodule's current commit + # hash (this includes for example what branches the commit is on) but + # only if the submodule is initialized. We ignore this information for + # now + _git_submodule_status_re = re.compile( + '^(?P[+-U ])(?P[0-9a-f]{40}) ' + '(?P\S+)( .*)?$') + + # The stdout should only contain one line--the status of the + # requested submodule + m = _git_submodule_status_re.match(stdout) + if m: + # Yes, the path *is* a git submodule + self._update_submodule(m.group('submodule'), m.group('status')) + return True + else: + log.warn( + 'Unexpected output from `git submodule status`:\n{0}\n' + 'Will attempt import from {1!r} regardless.'.format( + stdout, self.path)) + return False + + def _check_submodule_no_git(self): + """ + Like ``_check_submodule_using_git``, but simply parses the .gitmodules file + to determine if the supplied path is a git submodule, and does not exec any + subprocesses. + + This can only determine if a path is a submodule--it does not perform + updates, etc. This function may need to be updated if the format of the + .gitmodules file is changed between git versions. + """ + + gitmodules_path = os.path.abspath('.gitmodules') + + if not os.path.isfile(gitmodules_path): + return False + + # This is a minimal reader for gitconfig-style files. It handles a few of + # the quirks that make gitconfig files incompatible with ConfigParser-style + # files, but does not support the full gitconfig syntax (just enough + # needed to read a .gitmodules file). + gitmodules_fileobj = io.StringIO() + + # Must use io.open for cross-Python-compatible behavior wrt unicode + with io.open(gitmodules_path) as f: + for line in f: + # gitconfig files are more flexible with leading whitespace; just + # go ahead and remove it + line = line.lstrip() + + # comments can start with either # or ; + if line and line[0] in (':', ';'): + continue + + gitmodules_fileobj.write(line) + + gitmodules_fileobj.seek(0) + + cfg = RawConfigParser() + + try: + cfg.readfp(gitmodules_fileobj) + except Exception as exc: + log.warn('Malformatted .gitmodules file: {0}\n' + '{1} cannot be assumed to be a git submodule.'.format( + exc, self.path)) + return False + + for section in cfg.sections(): + if not cfg.has_option(section, 'path'): + continue + + submodule_path = cfg.get(section, 'path').rstrip(os.sep) + + if submodule_path == self.path.rstrip(os.sep): + return True + + return False + + def _update_submodule(self, submodule, status): + if status == ' ': + # The submodule is up to date; no action necessary + return + elif status == '-': + if self.offline: + raise _AHBootstrapSystemExit( + "Cannot initialize the {0} submodule in --offline mode; " + "this requires being able to clone the submodule from an " + "online repository.".format(submodule)) + cmd = ['update', '--init'] + action = 'Initializing' + elif status == '+': + cmd = ['update'] + action = 'Updating' + if self.offline: + cmd.append('--no-fetch') + elif status == 'U': + raise _AHBootstrapSystemExit( + 'Error: Submodule {0} contains unresolved merge conflicts. ' + 'Please complete or abandon any changes in the submodule so that ' + 'it is in a usable state, then try again.'.format(submodule)) + else: + log.warn('Unknown status {0!r} for git submodule {1!r}. Will ' + 'attempt to use the submodule as-is, but try to ensure ' + 'that the submodule is in a clean state and contains no ' + 'conflicts or errors.\n{2}'.format(status, submodule, + _err_help_msg)) + return + + err_msg = None + cmd = ['git', 'submodule'] + cmd + ['--', submodule] + log.warn('{0} {1} submodule with: `{2}`'.format( + action, submodule, ' '.join(cmd))) + + try: + log.info('Running `{0}`; use the --no-git option to disable git ' + 'commands'.format(' '.join(cmd))) + returncode, stdout, stderr = run_cmd(cmd) + except OSError as e: + err_msg = str(e) + else: + if returncode != 0: + err_msg = stderr + + if err_msg is not None: + log.warn('An unexpected error occurred updating the git submodule ' + '{0!r}:\n{1}\n{2}'.format(submodule, err_msg, + _err_help_msg)) + +class _CommandNotFound(OSError): + """ + An exception raised when a command run with run_cmd is not found on the + system. + """ + + +def run_cmd(cmd): + """ + Run a command in a subprocess, given as a list of command-line + arguments. + + Returns a ``(returncode, stdout, stderr)`` tuple. + """ + + try: + p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE) + # XXX: May block if either stdout or stderr fill their buffers; + # however for the commands this is currently used for that is + # unlikely (they should have very brief output) + stdout, stderr = p.communicate() + except OSError as e: + if DEBUG: + raise + + if e.errno == errno.ENOENT: + msg = 'Command not found: `{0}`'.format(' '.join(cmd)) + raise _CommandNotFound(msg, cmd) + else: + raise _AHBootstrapSystemExit( + 'An unexpected error occurred when running the ' + '`{0}` command:\n{1}'.format(' '.join(cmd), str(e))) + + + # Can fail of the default locale is not configured properly. See + # https://github.com/astropy/astropy/issues/2749. For the purposes under + # consideration 'latin1' is an acceptable fallback. + try: + stdio_encoding = locale.getdefaultlocale()[1] or 'latin1' + except ValueError: + # Due to an OSX oddity locale.getdefaultlocale() can also crash + # depending on the user's locale/language settings. See: + # http://bugs.python.org/issue18378 + stdio_encoding = 'latin1' + + # Unlikely to fail at this point but even then let's be flexible + if not isinstance(stdout, _text_type): + stdout = stdout.decode(stdio_encoding, 'replace') + if not isinstance(stderr, _text_type): + stderr = stderr.decode(stdio_encoding, 'replace') + + return (p.returncode, stdout, stderr) + + +def _next_version(version): + """ + Given a parsed version from pkg_resources.parse_version, returns a new + version string with the next minor version. + + Examples + ======== + >>> _next_version(pkg_resources.parse_version('1.2.3')) + '1.3.0' + """ + + if hasattr(version, 'base_version'): + # New version parsing from setuptools >= 8.0 + if version.base_version: + parts = version.base_version.split('.') + else: + parts = [] + else: + parts = [] + for part in version: + if part.startswith('*'): + break + parts.append(part) + + parts = [int(p) for p in parts] + + if len(parts) < 3: + parts += [0] * (3 - len(parts)) + + major, minor, micro = parts[:3] + + return '{0}.{1}.{2}'.format(major, minor + 1, 0) + + +class _DummyFile(object): + """A noop writeable object.""" + + errors = '' # Required for Python 3.x + encoding = 'utf-8' + + def write(self, s): + pass + + def flush(self): + pass + + +@contextlib.contextmanager +def _silence(): + """A context manager that silences sys.stdout and sys.stderr.""" + + old_stdout = sys.stdout + old_stderr = sys.stderr + sys.stdout = _DummyFile() + sys.stderr = _DummyFile() + exception_occurred = False + try: + yield + except: + exception_occurred = True + # Go ahead and clean up so that exception handling can work normally + sys.stdout = old_stdout + sys.stderr = old_stderr + raise + + if not exception_occurred: + sys.stdout = old_stdout + sys.stderr = old_stderr + + +_err_help_msg = """ +If the problem persists consider installing astropy_helpers manually using pip +(`pip install astropy_helpers`) or by manually downloading the source archive, +extracting it, and installing by running `python setup.py install` from the +root of the extracted source code. +""" + + +class _AHBootstrapSystemExit(SystemExit): + def __init__(self, *args): + if not args: + msg = 'An unknown problem occurred bootstrapping astropy_helpers.' + else: + msg = args[0] + + msg += '\n' + _err_help_msg + + super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:]) + + +BOOTSTRAPPER = _Bootstrapper.main() + + +def use_astropy_helpers(**kwargs): + """ + Ensure that the `astropy_helpers` module is available and is importable. + This supports automatic submodule initialization if astropy_helpers is + included in a project as a git submodule, or will download it from PyPI if + necessary. + + Parameters + ---------- + + path : str or None, optional + A filesystem path relative to the root of the project's source code + that should be added to `sys.path` so that `astropy_helpers` can be + imported from that path. + + If the path is a git submodule it will automatically be initialized + and/or updated. + + The path may also be to a ``.tar.gz`` archive of the astropy_helpers + source distribution. In this case the archive is automatically + unpacked and made temporarily available on `sys.path` as a ``.egg`` + archive. + + If `None` skip straight to downloading. + + download_if_needed : bool, optional + If the provided filesystem path is not found an attempt will be made to + download astropy_helpers from PyPI. It will then be made temporarily + available on `sys.path` as a ``.egg`` archive (using the + ``setup_requires`` feature of setuptools. If the ``--offline`` option + is given at the command line the value of this argument is overridden + to `False`. + + index_url : str, optional + If provided, use a different URL for the Python package index than the + main PyPI server. + + use_git : bool, optional + If `False` no git commands will be used--this effectively disables + support for git submodules. If the ``--no-git`` option is given at the + command line the value of this argument is overridden to `False`. + + auto_upgrade : bool, optional + By default, when installing a package from a non-development source + distribution ah_boostrap will try to automatically check for patch + releases to astropy-helpers on PyPI and use the patched version over + any bundled versions. Setting this to `False` will disable that + functionality. If the ``--offline`` option is given at the command line + the value of this argument is overridden to `False`. + + offline : bool, optional + If `False` disable all actions that require an internet connection, + including downloading packages from the package index and fetching + updates to any git submodule. Defaults to `True`. + """ + + global BOOTSTRAPPER + + config = BOOTSTRAPPER.config + config.update(**kwargs) + + # Create a new bootstrapper with the updated configuration and run it + BOOTSTRAPPER = _Bootstrapper(**config) + BOOTSTRAPPER.run() diff --git a/astropy/__init__.py b/astropy/__init__.py new file mode 100644 index 0000000..24b4055 --- /dev/null +++ b/astropy/__init__.py @@ -0,0 +1,342 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astropy is a package intended to contain core functionality and some +common tools needed for performing astronomy and astrophysics research with +Python. It also provides an index for other astronomy packages and tools for +managing them. +""" + +from __future__ import absolute_import + +import sys +import os +from warnings import warn + +if sys.version_info[:2] < (2, 7): + warn("Astropy does not support Python 2.6 (in v1.2 and later)") + + +def _is_astropy_source(path=None): + """ + Returns whether the source for this module is directly in an astropy + source distribution or checkout. + """ + + # If this __init__.py file is in ./astropy/ then import is within a source + # dir .astropy-root is a file distributed with the source, but that should + # not installed + if path is None: + path = os.path.join(os.path.dirname(__file__), os.pardir) + elif os.path.isfile(path): + path = os.path.dirname(path) + + source_dir = os.path.abspath(path) + return os.path.exists(os.path.join(source_dir, '.astropy-root')) + + +def _is_astropy_setup(): + """ + Returns whether we are currently being imported in the context of running + Astropy's setup.py. + """ + + main_mod = sys.modules.get('__main__') + if not main_mod: + return False + + return (getattr(main_mod, '__file__', False) and + os.path.basename(main_mod.__file__).rstrip('co') == 'setup.py' and + _is_astropy_source(main_mod.__file__)) + + +# this indicates whether or not we are in astropy's setup.py +try: + _ASTROPY_SETUP_ +except NameError: + from sys import version_info + if version_info[0] >= 3: + import builtins + else: + import __builtin__ as builtins + + # This will set the _ASTROPY_SETUP_ to True by default if + # we are running Astropy's setup.py + builtins._ASTROPY_SETUP_ = _is_astropy_setup() + + +try: + from .version import version as __version__ +except ImportError: + # TODO: Issue a warning using the logging framework + __version__ = '' +try: + from .version import githash as __githash__ +except ImportError: + # TODO: Issue a warning using the logging framework + __githash__ = '' + + +__minimum_numpy_version__ = '1.9.0' + + +# The location of the online documentation for astropy +# This location will normally point to the current released version of astropy +if 'dev' in __version__: + online_docs_root = 'http://docs.astropy.org/en/latest/' +else: + online_docs_root = 'http://docs.astropy.org/en/{0}/'.format(__version__) + + +def _check_numpy(): + """ + Check that Numpy is installed and it is of the minimum version we + require. + """ + # Note: We could have used distutils.version for this comparison, + # but it seems like overkill to import distutils at runtime. + requirement_met = False + + try: + import numpy + except ImportError: + pass + else: + from .utils import minversion + requirement_met = minversion(numpy, __minimum_numpy_version__) + + if not requirement_met: + msg = ("Numpy version {0} or later must be installed to use " + "Astropy".format(__minimum_numpy_version__)) + raise ImportError(msg) + + return numpy + + +if not _ASTROPY_SETUP_: + _check_numpy() + + +from . import config as _config + + +class Conf(_config.ConfigNamespace): + """ + Configuration parameters for `astropy`. + """ + + unicode_output = _config.ConfigItem( + False, + 'When True, use Unicode characters when outputting values, and ' + 'displaying widgets at the console.') + use_color = _config.ConfigItem( + sys.platform != 'win32', + 'When True, use ANSI color escape sequences when writing to the console.', + aliases=['astropy.utils.console.USE_COLOR', 'astropy.logger.USE_COLOR']) + max_lines = _config.ConfigItem( + None, + description='Maximum number of lines in the display of pretty-printed ' + 'objects. If not provided, try to determine automatically from the ' + 'terminal size. Negative numbers mean no limit.', + cfgtype='integer(default=None)', + aliases=['astropy.table.pprint.max_lines']) + max_width = _config.ConfigItem( + None, + description='Maximum number of characters per line in the display of ' + 'pretty-printed objects. If not provided, try to determine ' + 'automatically from the terminal size. Negative numbers mean no ' + 'limit.', + cfgtype='integer(default=None)', + aliases=['astropy.table.pprint.max_width']) + + +conf = Conf() + +# Create the test() function +from .tests.runner import TestRunner +test = TestRunner.make_test_runner_in(__path__[0]) + + +# if we are *not* in setup mode, import the logger and possibly populate the +# configuration file with the defaults +def _initialize_astropy(): + from . import config + + def _rollback_import(message): + log.error(message) + # Now disable exception logging to avoid an annoying error in the + # exception logger before we raise the import error: + _teardown_log() + + # Roll back any astropy sub-modules that have been imported thus + # far + + for key in list(sys.modules): + if key.startswith('astropy.'): + del sys.modules[key] + raise ImportError('astropy') + + try: + from .utils import _compiler + except ImportError: + if _is_astropy_source(): + log.warning('You appear to be trying to import astropy from ' + 'within a source checkout without building the ' + 'extension modules first. Attempting to (re)build ' + 'extension modules:') + + try: + _rebuild_extensions() + except BaseException as exc: + _rollback_import( + 'An error occurred while attempting to rebuild the ' + 'extension modules. Please try manually running ' + '`./setup.py develop` or `./setup.py build_ext ' + '--inplace` to see what the issue was. Extension ' + 'modules must be successfully compiled and importable ' + 'in order to import astropy.') + # Reraise the Exception only in case it wasn't an Exception, + # for example if a "SystemExit" or "KeyboardInterrupt" was + # invoked. + if not isinstance(exc, Exception): + raise + + else: + # Outright broken installation; don't be nice. + raise + + # add these here so we only need to cleanup the namespace at the end + config_dir = os.path.dirname(__file__) + + try: + config.configuration.update_default_config(__package__, config_dir) + except config.configuration.ConfigurationDefaultMissingError as e: + wmsg = (e.args[0] + " Cannot install default profile. If you are " + "importing from source, this is expected.") + warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg)) + + +def _rebuild_extensions(): + global __version__ + global __githash__ + + import subprocess + import time + + from .utils.console import Spinner + from .extern.six import next + + devnull = open(os.devnull, 'w') + old_cwd = os.getcwd() + os.chdir(os.path.join(os.path.dirname(__file__), os.pardir)) + try: + sp = subprocess.Popen([sys.executable, 'setup.py', 'build_ext', + '--inplace'], stdout=devnull, + stderr=devnull) + with Spinner('Rebuilding extension modules') as spinner: + while sp.poll() is None: + next(spinner) + time.sleep(0.05) + finally: + os.chdir(old_cwd) + devnull.close() + + if sp.returncode != 0: + raise OSError('Running setup.py build_ext --inplace failed ' + 'with error code {0}: try rerunning this command ' + 'manually to check what the error was.'.format( + sp.returncode)) + + # Try re-loading module-level globals from the astropy.version module, + # which may not have existed before this function ran + try: + from .version import version as __version__ + except ImportError: + pass + + try: + from .version import githash as __githash__ + except ImportError: + pass + + +# Set the bibtex entry to the article referenced in CITATION +def _get_bibtex(): + import re + if os.path.exists('CITATION'): + with open('CITATION', 'r') as citation: + refs = re.findall(r'\{[^()]*\}', citation.read()) + if len(refs) == 0: return '' + bibtexreference = "@ARTICLE{0}".format(refs[0]) + return bibtexreference + else: + return '' + + +__bibtex__ = _get_bibtex() + + +import logging + +# Use the root logger as a dummy log before initilizing Astropy's logger +log = logging.getLogger() + + +if not _ASTROPY_SETUP_: + from .logger import _init_log, _teardown_log + + log = _init_log() + + _initialize_astropy() + + from .utils.misc import find_api_page + + +def online_help(query): + """ + Search the online Astropy documentation for the given query. + Opens the results in the default web browser. Requires an active + Internet connection. + + Parameters + ---------- + query : str + The search query. + """ + from .extern.six.moves.urllib.parse import urlencode + import webbrowser + + version = __version__ + if 'dev' in version: + version = 'latest' + else: + version = 'v' + version + + url = 'http://docs.astropy.org/en/{0}/search.html?{1}'.format( + version, urlencode({'q': query})) + + webbrowser.open(url) + + +__dir__ = ['__version__', '__githash__', '__minimum_numpy_version__', + '__bibtex__', 'test', 'log', 'find_api_page', 'online_help', + 'online_docs_root', 'conf'] + + +from types import ModuleType as __module_type__ +# Clean up top-level namespace--delete everything that isn't in __dir__ +# or is a magic attribute, and that isn't a submodule of this package +for varname in dir(): + if not ((varname.startswith('__') and varname.endswith('__')) or + varname in __dir__ or + (varname[0] != '_' and + isinstance(locals()[varname], __module_type__) and + locals()[varname].__name__.startswith(__name__ + '.'))): + # The last clause in the the above disjunction deserves explanation: + # When using relative imports like ``from .. import config``, the + # ``config`` variable is automatically created in the namespace of + # whatever module ``..`` resolves to (in this case astropy). This + # happens a few times just in the module setup above. This allows + # the cleanup to keep any public submodules of the astropy package + del locals()[varname] + +del varname, __module_type__ diff --git a/astropy/_compiler.c b/astropy/_compiler.c new file mode 100644 index 0000000..75500ca --- /dev/null +++ b/astropy/_compiler.c @@ -0,0 +1,129 @@ +#include + +/*************************************************************************** + * Macros for determining the compiler version. + * + * These are borrowed from boost, and majorly abridged to include only + * the compilers we care about. + ***************************************************************************/ + +#ifndef PY3K +#if PY_MAJOR_VERSION >= 3 +#define PY3K 1 +#else +#define PY3K 0 +#endif +#endif + + +#define STRINGIZE(X) DO_STRINGIZE(X) +#define DO_STRINGIZE(X) #X + +#if defined __clang__ +/* Clang C++ emulates GCC, so it has to appear early. */ +# define COMPILER "Clang version " __clang_version__ + +#elif defined(__INTEL_COMPILER) || defined(__ICL) || defined(__ICC) || defined(__ECC) +/* Intel */ +# if defined(__INTEL_COMPILER) +# define INTEL_VERSION __INTEL_COMPILER +# elif defined(__ICL) +# define INTEL_VERSION __ICL +# elif defined(__ICC) +# define INTEL_VERSION __ICC +# elif defined(__ECC) +# define INTEL_VERSION __ECC +# endif +# define COMPILER "Intel C compiler version " STRINGIZE(INTEL_VERSION) + +#elif defined(__GNUC__) +/* gcc */ +# define COMPILER "GCC version " __VERSION__ + +#elif defined(__SUNPRO_CC) +/* Sun Workshop Compiler */ +# define COMPILER "Sun compiler version " STRINGIZE(__SUNPRO_CC) + +#elif defined(_MSC_VER) +/* Microsoft Visual C/C++ + Must be last since other compilers define _MSC_VER for compatibility as well */ +# if _MSC_VER < 1200 +# define COMPILER_VERSION 5.0 +# elif _MSC_VER < 1300 +# define COMPILER_VERSION 6.0 +# elif _MSC_VER == 1300 +# define COMPILER_VERSION 7.0 +# elif _MSC_VER == 1310 +# define COMPILER_VERSION 7.1 +# elif _MSC_VER == 1400 +# define COMPILER_VERSION 8.0 +# elif _MSC_VER == 1500 +# define COMPILER_VERSION 9.0 +# elif _MSC_VER == 1600 +# define COMPILER_VERSION 10.0 +# else +# define COMPILER_VERSION _MSC_VER +# endif +# define COMPILER "Microsoft Visual C++ version " STRINGIZE(COMPILER_VERSION) + +#else +/* Fallback */ +# define COMPILER "Unknown compiler" + +#endif + + +/*************************************************************************** + * Module-level + ***************************************************************************/ + +struct module_state { +/* The Sun compiler can't handle empty structs */ +#if defined(__SUNPRO_C) || defined(_MSC_VER) + int _dummy; +#endif +}; + +#if PY3K + static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_compiler", + NULL, + sizeof(struct module_state), + NULL, + NULL, + NULL, + NULL, + NULL + }; + + #define INITERROR return NULL + + PyMODINIT_FUNC + PyInit__compiler(void) + +#else + #define INITERROR return + + PyMODINIT_FUNC + init_compiler(void) +#endif + +{ + PyObject* m; + +#if PY3K + m = PyModule_Create(&moduledef); +#else + m = Py_InitModule3("_compiler", NULL, NULL); +#endif + + if (m == NULL) + INITERROR; + + PyModule_AddStringConstant(m, "compiler", COMPILER); + +#if PY3K + return m; +#endif +} diff --git a/astropy/_erfa/__init__.py b/astropy/_erfa/__init__.py new file mode 100644 index 0000000..35bb1ab --- /dev/null +++ b/astropy/_erfa/__init__.py @@ -0,0 +1,7 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +try: + # The ERFA wrappers are not guaranteed available at setup time + from .core import * +except ImportError: + if not _ASTROPY_SETUP_: + raise diff --git a/astropy/_erfa/core.c b/astropy/_erfa/core.c new file mode 100644 index 0000000..624e74d --- /dev/null +++ b/astropy/_erfa/core.c @@ -0,0 +1,6044 @@ +/* -*- mode: c -*- */ + +/* Licensed under a 3-clause BSD style license - see LICENSE.rst */ + +/* "core.c" is auto-generated by erfa_generator.py from the template + "core.c.templ". Do *not* edit "core.c" directly, instead edit + "core.c.templ" and run erfa_generator.py from the source directory to + update it. */ + + +#include +#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION +#include +#include "erfa.h" + + +#if PY_MAJOR_VERSION >= 3 +#define PY3K 1 +#else +#define PY3K 0 +#endif + + +typedef struct { + PyObject_HEAD + NpyIter *iter; +} _NpyIterObject; + + +#define MODULE_DOCSTRING \ + "This module contains the C part of the ERFA python wrappers.\n" \ + "This implements only the inner iterator loops, while the heavy lifting\n" \ + "happens in Python in core.py\n\n" \ + "For more about the module and how to use it, see the ``core.py``\n" \ + "docstrings." + +static PyObject *Py_cal2jd(PyObject *self, PyObject *args, PyObject *kwds) +{ + int (*_iy); + int (*_im); + int (*_id); + double (*_djm0); + double (*_djm); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _iy = ((int (*))(dataptrarray[0])); + _im = ((int (*))(dataptrarray[1])); + _id = ((int (*))(dataptrarray[2])); + _djm0 = ((double (*))(dataptrarray[3])); + _djm = ((double (*))(dataptrarray[4])); + + _c_retval = eraCal2jd(*_iy, *_im, *_id, _djm0, _djm); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_epb(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_dj1); + double (*_dj2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _dj1 = ((double (*))(dataptrarray[0])); + _dj2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraEpb(*_dj1, *_dj2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_epb2jd(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_epb); + double (*_djm0); + double (*_djm); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _epb = ((double (*))(dataptrarray[0])); + _djm0 = ((double (*))(dataptrarray[1])); + _djm = ((double (*))(dataptrarray[2])); + + eraEpb2jd(*_epb, _djm0, _djm); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_epj(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_dj1); + double (*_dj2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _dj1 = ((double (*))(dataptrarray[0])); + _dj2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraEpj(*_dj1, *_dj2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_epj2jd(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_epj); + double (*_djm0); + double (*_djm); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _epj = ((double (*))(dataptrarray[0])); + _djm0 = ((double (*))(dataptrarray[1])); + _djm = ((double (*))(dataptrarray[2])); + + eraEpj2jd(*_epj, _djm0, _djm); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_jd2cal(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_dj1); + double (*_dj2); + int (*_iy); + int (*_im); + int (*_id); + double (*_fd); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _dj1 = ((double (*))(dataptrarray[0])); + _dj2 = ((double (*))(dataptrarray[1])); + _iy = ((int (*))(dataptrarray[2])); + _im = ((int (*))(dataptrarray[3])); + _id = ((int (*))(dataptrarray[4])); + _fd = ((double (*))(dataptrarray[5])); + + _c_retval = eraJd2cal(*_dj1, *_dj2, _iy, _im, _id, _fd); + *((int *)(dataptrarray[6])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_jdcalf(PyObject *self, PyObject *args, PyObject *kwds) +{ + int (*_ndp); + double (*_dj1); + double (*_dj2); + int (*_iymdf)[4]; + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ndp = ((int (*))(dataptrarray[0])); + _dj1 = ((double (*))(dataptrarray[1])); + _dj2 = ((double (*))(dataptrarray[2])); + _iymdf = ((int (*)[4])(dataptrarray[3])); + + _c_retval = eraJdcalf(*_ndp, *_dj1, *_dj2, *_iymdf); + *((int *)(dataptrarray[4])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_ab(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_pnat)[3]; + double (*_v)[3]; + double (*_s); + double (*_bm1); + double (*_ppr)[3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _pnat = ((double (*)[3])(dataptrarray[0])); + _v = ((double (*)[3])(dataptrarray[1])); + _s = ((double (*))(dataptrarray[2])); + _bm1 = ((double (*))(dataptrarray[3])); + _ppr = ((double (*)[3])(dataptrarray[4])); + + eraAb(*_pnat, *_v, *_s, *_bm1, *_ppr); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_apcg(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_ebpv)[2][3]; + double (*_ehp)[3]; + eraASTROM (*_astrom); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _ebpv = ((double (*)[2][3])(dataptrarray[2])); + _ehp = ((double (*)[3])(dataptrarray[3])); + _astrom = ((eraASTROM (*))(dataptrarray[4])); + + eraApcg(*_date1, *_date2, *_ebpv, *_ehp, _astrom); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_apcg13(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + eraASTROM (*_astrom); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _astrom = ((eraASTROM (*))(dataptrarray[2])); + + eraApcg13(*_date1, *_date2, _astrom); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_apci(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_ebpv)[2][3]; + double (*_ehp)[3]; + double (*_x); + double (*_y); + double (*_s); + eraASTROM (*_astrom); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _ebpv = ((double (*)[2][3])(dataptrarray[2])); + _ehp = ((double (*)[3])(dataptrarray[3])); + _x = ((double (*))(dataptrarray[4])); + _y = ((double (*))(dataptrarray[5])); + _s = ((double (*))(dataptrarray[6])); + _astrom = ((eraASTROM (*))(dataptrarray[7])); + + eraApci(*_date1, *_date2, *_ebpv, *_ehp, *_x, *_y, *_s, _astrom); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_apci13(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + eraASTROM (*_astrom); + double (*_eo); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _astrom = ((eraASTROM (*))(dataptrarray[2])); + _eo = ((double (*))(dataptrarray[3])); + + eraApci13(*_date1, *_date2, _astrom, _eo); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_apco(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_ebpv)[2][3]; + double (*_ehp)[3]; + double (*_x); + double (*_y); + double (*_s); + double (*_theta); + double (*_elong); + double (*_phi); + double (*_hm); + double (*_xp); + double (*_yp); + double (*_sp); + double (*_refa); + double (*_refb); + eraASTROM (*_astrom); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _ebpv = ((double (*)[2][3])(dataptrarray[2])); + _ehp = ((double (*)[3])(dataptrarray[3])); + _x = ((double (*))(dataptrarray[4])); + _y = ((double (*))(dataptrarray[5])); + _s = ((double (*))(dataptrarray[6])); + _theta = ((double (*))(dataptrarray[7])); + _elong = ((double (*))(dataptrarray[8])); + _phi = ((double (*))(dataptrarray[9])); + _hm = ((double (*))(dataptrarray[10])); + _xp = ((double (*))(dataptrarray[11])); + _yp = ((double (*))(dataptrarray[12])); + _sp = ((double (*))(dataptrarray[13])); + _refa = ((double (*))(dataptrarray[14])); + _refb = ((double (*))(dataptrarray[15])); + _astrom = ((eraASTROM (*))(dataptrarray[16])); + + eraApco(*_date1, *_date2, *_ebpv, *_ehp, *_x, *_y, *_s, *_theta, *_elong, *_phi, *_hm, *_xp, *_yp, *_sp, *_refa, *_refb, _astrom); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_apco13(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_utc1); + double (*_utc2); + double (*_dut1); + double (*_elong); + double (*_phi); + double (*_hm); + double (*_xp); + double (*_yp); + double (*_phpa); + double (*_tc); + double (*_rh); + double (*_wl); + eraASTROM (*_astrom); + double (*_eo); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _utc1 = ((double (*))(dataptrarray[0])); + _utc2 = ((double (*))(dataptrarray[1])); + _dut1 = ((double (*))(dataptrarray[2])); + _elong = ((double (*))(dataptrarray[3])); + _phi = ((double (*))(dataptrarray[4])); + _hm = ((double (*))(dataptrarray[5])); + _xp = ((double (*))(dataptrarray[6])); + _yp = ((double (*))(dataptrarray[7])); + _phpa = ((double (*))(dataptrarray[8])); + _tc = ((double (*))(dataptrarray[9])); + _rh = ((double (*))(dataptrarray[10])); + _wl = ((double (*))(dataptrarray[11])); + _astrom = ((eraASTROM (*))(dataptrarray[12])); + _eo = ((double (*))(dataptrarray[13])); + + _c_retval = eraApco13(*_utc1, *_utc2, *_dut1, *_elong, *_phi, *_hm, *_xp, *_yp, *_phpa, *_tc, *_rh, *_wl, _astrom, _eo); + *((int *)(dataptrarray[14])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_apcs(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_pv)[2][3]; + double (*_ebpv)[2][3]; + double (*_ehp)[3]; + eraASTROM (*_astrom); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _pv = ((double (*)[2][3])(dataptrarray[2])); + _ebpv = ((double (*)[2][3])(dataptrarray[3])); + _ehp = ((double (*)[3])(dataptrarray[4])); + _astrom = ((eraASTROM (*))(dataptrarray[5])); + + eraApcs(*_date1, *_date2, *_pv, *_ebpv, *_ehp, _astrom); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_apcs13(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_pv)[2][3]; + eraASTROM (*_astrom); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _pv = ((double (*)[2][3])(dataptrarray[2])); + _astrom = ((eraASTROM (*))(dataptrarray[3])); + + eraApcs13(*_date1, *_date2, *_pv, _astrom); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_aper(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_theta); + eraASTROM (*_astrom); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _theta = ((double (*))(dataptrarray[0])); + _astrom = ((eraASTROM (*))(dataptrarray[1])); + + eraAper(*_theta, _astrom); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_aper13(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_ut11); + double (*_ut12); + eraASTROM (*_astrom); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ut11 = ((double (*))(dataptrarray[0])); + _ut12 = ((double (*))(dataptrarray[1])); + _astrom = ((eraASTROM (*))(dataptrarray[2])); + + eraAper13(*_ut11, *_ut12, _astrom); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_apio(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_sp); + double (*_theta); + double (*_elong); + double (*_phi); + double (*_hm); + double (*_xp); + double (*_yp); + double (*_refa); + double (*_refb); + eraASTROM (*_astrom); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _sp = ((double (*))(dataptrarray[0])); + _theta = ((double (*))(dataptrarray[1])); + _elong = ((double (*))(dataptrarray[2])); + _phi = ((double (*))(dataptrarray[3])); + _hm = ((double (*))(dataptrarray[4])); + _xp = ((double (*))(dataptrarray[5])); + _yp = ((double (*))(dataptrarray[6])); + _refa = ((double (*))(dataptrarray[7])); + _refb = ((double (*))(dataptrarray[8])); + _astrom = ((eraASTROM (*))(dataptrarray[9])); + + eraApio(*_sp, *_theta, *_elong, *_phi, *_hm, *_xp, *_yp, *_refa, *_refb, _astrom); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_apio13(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_utc1); + double (*_utc2); + double (*_dut1); + double (*_elong); + double (*_phi); + double (*_hm); + double (*_xp); + double (*_yp); + double (*_phpa); + double (*_tc); + double (*_rh); + double (*_wl); + eraASTROM (*_astrom); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _utc1 = ((double (*))(dataptrarray[0])); + _utc2 = ((double (*))(dataptrarray[1])); + _dut1 = ((double (*))(dataptrarray[2])); + _elong = ((double (*))(dataptrarray[3])); + _phi = ((double (*))(dataptrarray[4])); + _hm = ((double (*))(dataptrarray[5])); + _xp = ((double (*))(dataptrarray[6])); + _yp = ((double (*))(dataptrarray[7])); + _phpa = ((double (*))(dataptrarray[8])); + _tc = ((double (*))(dataptrarray[9])); + _rh = ((double (*))(dataptrarray[10])); + _wl = ((double (*))(dataptrarray[11])); + _astrom = ((eraASTROM (*))(dataptrarray[12])); + + _c_retval = eraApio13(*_utc1, *_utc2, *_dut1, *_elong, *_phi, *_hm, *_xp, *_yp, *_phpa, *_tc, *_rh, *_wl, _astrom); + *((int *)(dataptrarray[13])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_atci13(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_rc); + double (*_dc); + double (*_pr); + double (*_pd); + double (*_px); + double (*_rv); + double (*_date1); + double (*_date2); + double (*_ri); + double (*_di); + double (*_eo); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _rc = ((double (*))(dataptrarray[0])); + _dc = ((double (*))(dataptrarray[1])); + _pr = ((double (*))(dataptrarray[2])); + _pd = ((double (*))(dataptrarray[3])); + _px = ((double (*))(dataptrarray[4])); + _rv = ((double (*))(dataptrarray[5])); + _date1 = ((double (*))(dataptrarray[6])); + _date2 = ((double (*))(dataptrarray[7])); + _ri = ((double (*))(dataptrarray[8])); + _di = ((double (*))(dataptrarray[9])); + _eo = ((double (*))(dataptrarray[10])); + + eraAtci13(*_rc, *_dc, *_pr, *_pd, *_px, *_rv, *_date1, *_date2, _ri, _di, _eo); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_atciq(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_rc); + double (*_dc); + double (*_pr); + double (*_pd); + double (*_px); + double (*_rv); + eraASTROM (*_astrom); + double (*_ri); + double (*_di); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _rc = ((double (*))(dataptrarray[0])); + _dc = ((double (*))(dataptrarray[1])); + _pr = ((double (*))(dataptrarray[2])); + _pd = ((double (*))(dataptrarray[3])); + _px = ((double (*))(dataptrarray[4])); + _rv = ((double (*))(dataptrarray[5])); + _astrom = ((eraASTROM (*))(dataptrarray[6])); + _ri = ((double (*))(dataptrarray[7])); + _di = ((double (*))(dataptrarray[8])); + + eraAtciq(*_rc, *_dc, *_pr, *_pd, *_px, *_rv, _astrom, _ri, _di); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_atciqn(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_rc); + double (*_dc); + double (*_pr); + double (*_pd); + double (*_px); + double (*_rv); + eraASTROM (*_astrom); + int (*_n); + eraLDBODY (*_b); + double (*_ri); + double (*_di); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _rc = ((double (*))(dataptrarray[0])); + _dc = ((double (*))(dataptrarray[1])); + _pr = ((double (*))(dataptrarray[2])); + _pd = ((double (*))(dataptrarray[3])); + _px = ((double (*))(dataptrarray[4])); + _rv = ((double (*))(dataptrarray[5])); + _astrom = ((eraASTROM (*))(dataptrarray[6])); + _n = ((int (*))(dataptrarray[7])); + _b = ((eraLDBODY (*))(dataptrarray[8])); + _ri = ((double (*))(dataptrarray[9])); + _di = ((double (*))(dataptrarray[10])); + + eraAtciqn(*_rc, *_dc, *_pr, *_pd, *_px, *_rv, _astrom, *_n, _b, _ri, _di); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_atciqz(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_rc); + double (*_dc); + eraASTROM (*_astrom); + double (*_ri); + double (*_di); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _rc = ((double (*))(dataptrarray[0])); + _dc = ((double (*))(dataptrarray[1])); + _astrom = ((eraASTROM (*))(dataptrarray[2])); + _ri = ((double (*))(dataptrarray[3])); + _di = ((double (*))(dataptrarray[4])); + + eraAtciqz(*_rc, *_dc, _astrom, _ri, _di); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_atco13(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_rc); + double (*_dc); + double (*_pr); + double (*_pd); + double (*_px); + double (*_rv); + double (*_utc1); + double (*_utc2); + double (*_dut1); + double (*_elong); + double (*_phi); + double (*_hm); + double (*_xp); + double (*_yp); + double (*_phpa); + double (*_tc); + double (*_rh); + double (*_wl); + double (*_aob); + double (*_zob); + double (*_hob); + double (*_dob); + double (*_rob); + double (*_eo); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _rc = ((double (*))(dataptrarray[0])); + _dc = ((double (*))(dataptrarray[1])); + _pr = ((double (*))(dataptrarray[2])); + _pd = ((double (*))(dataptrarray[3])); + _px = ((double (*))(dataptrarray[4])); + _rv = ((double (*))(dataptrarray[5])); + _utc1 = ((double (*))(dataptrarray[6])); + _utc2 = ((double (*))(dataptrarray[7])); + _dut1 = ((double (*))(dataptrarray[8])); + _elong = ((double (*))(dataptrarray[9])); + _phi = ((double (*))(dataptrarray[10])); + _hm = ((double (*))(dataptrarray[11])); + _xp = ((double (*))(dataptrarray[12])); + _yp = ((double (*))(dataptrarray[13])); + _phpa = ((double (*))(dataptrarray[14])); + _tc = ((double (*))(dataptrarray[15])); + _rh = ((double (*))(dataptrarray[16])); + _wl = ((double (*))(dataptrarray[17])); + _aob = ((double (*))(dataptrarray[18])); + _zob = ((double (*))(dataptrarray[19])); + _hob = ((double (*))(dataptrarray[20])); + _dob = ((double (*))(dataptrarray[21])); + _rob = ((double (*))(dataptrarray[22])); + _eo = ((double (*))(dataptrarray[23])); + + _c_retval = eraAtco13(*_rc, *_dc, *_pr, *_pd, *_px, *_rv, *_utc1, *_utc2, *_dut1, *_elong, *_phi, *_hm, *_xp, *_yp, *_phpa, *_tc, *_rh, *_wl, _aob, _zob, _hob, _dob, _rob, _eo); + *((int *)(dataptrarray[24])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_atic13(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_ri); + double (*_di); + double (*_date1); + double (*_date2); + double (*_rc); + double (*_dc); + double (*_eo); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ri = ((double (*))(dataptrarray[0])); + _di = ((double (*))(dataptrarray[1])); + _date1 = ((double (*))(dataptrarray[2])); + _date2 = ((double (*))(dataptrarray[3])); + _rc = ((double (*))(dataptrarray[4])); + _dc = ((double (*))(dataptrarray[5])); + _eo = ((double (*))(dataptrarray[6])); + + eraAtic13(*_ri, *_di, *_date1, *_date2, _rc, _dc, _eo); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_aticq(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_ri); + double (*_di); + eraASTROM (*_astrom); + double (*_rc); + double (*_dc); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ri = ((double (*))(dataptrarray[0])); + _di = ((double (*))(dataptrarray[1])); + _astrom = ((eraASTROM (*))(dataptrarray[2])); + _rc = ((double (*))(dataptrarray[3])); + _dc = ((double (*))(dataptrarray[4])); + + eraAticq(*_ri, *_di, _astrom, _rc, _dc); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_aticqn(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_ri); + double (*_di); + eraASTROM (*_astrom); + int (*_n); + eraLDBODY (*_b); + double (*_rc); + double (*_dc); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ri = ((double (*))(dataptrarray[0])); + _di = ((double (*))(dataptrarray[1])); + _astrom = ((eraASTROM (*))(dataptrarray[2])); + _n = ((int (*))(dataptrarray[3])); + _b = ((eraLDBODY (*))(dataptrarray[4])); + _rc = ((double (*))(dataptrarray[5])); + _dc = ((double (*))(dataptrarray[6])); + + eraAticqn(*_ri, *_di, _astrom, *_n, _b, _rc, _dc); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_atio13(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_ri); + double (*_di); + double (*_utc1); + double (*_utc2); + double (*_dut1); + double (*_elong); + double (*_phi); + double (*_hm); + double (*_xp); + double (*_yp); + double (*_phpa); + double (*_tc); + double (*_rh); + double (*_wl); + double (*_aob); + double (*_zob); + double (*_hob); + double (*_dob); + double (*_rob); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ri = ((double (*))(dataptrarray[0])); + _di = ((double (*))(dataptrarray[1])); + _utc1 = ((double (*))(dataptrarray[2])); + _utc2 = ((double (*))(dataptrarray[3])); + _dut1 = ((double (*))(dataptrarray[4])); + _elong = ((double (*))(dataptrarray[5])); + _phi = ((double (*))(dataptrarray[6])); + _hm = ((double (*))(dataptrarray[7])); + _xp = ((double (*))(dataptrarray[8])); + _yp = ((double (*))(dataptrarray[9])); + _phpa = ((double (*))(dataptrarray[10])); + _tc = ((double (*))(dataptrarray[11])); + _rh = ((double (*))(dataptrarray[12])); + _wl = ((double (*))(dataptrarray[13])); + _aob = ((double (*))(dataptrarray[14])); + _zob = ((double (*))(dataptrarray[15])); + _hob = ((double (*))(dataptrarray[16])); + _dob = ((double (*))(dataptrarray[17])); + _rob = ((double (*))(dataptrarray[18])); + + _c_retval = eraAtio13(*_ri, *_di, *_utc1, *_utc2, *_dut1, *_elong, *_phi, *_hm, *_xp, *_yp, *_phpa, *_tc, *_rh, *_wl, _aob, _zob, _hob, _dob, _rob); + *((int *)(dataptrarray[19])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_atioq(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_ri); + double (*_di); + eraASTROM (*_astrom); + double (*_aob); + double (*_zob); + double (*_hob); + double (*_dob); + double (*_rob); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ri = ((double (*))(dataptrarray[0])); + _di = ((double (*))(dataptrarray[1])); + _astrom = ((eraASTROM (*))(dataptrarray[2])); + _aob = ((double (*))(dataptrarray[3])); + _zob = ((double (*))(dataptrarray[4])); + _hob = ((double (*))(dataptrarray[5])); + _dob = ((double (*))(dataptrarray[6])); + _rob = ((double (*))(dataptrarray[7])); + + eraAtioq(*_ri, *_di, _astrom, _aob, _zob, _hob, _dob, _rob); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_atoc13(PyObject *self, PyObject *args, PyObject *kwds) +{ + const char (*_type); + double (*_ob1); + double (*_ob2); + double (*_utc1); + double (*_utc2); + double (*_dut1); + double (*_elong); + double (*_phi); + double (*_hm); + double (*_xp); + double (*_yp); + double (*_phpa); + double (*_tc); + double (*_rh); + double (*_wl); + double (*_rc); + double (*_dc); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _type = ((const char (*))(dataptrarray[0])); + _ob1 = ((double (*))(dataptrarray[1])); + _ob2 = ((double (*))(dataptrarray[2])); + _utc1 = ((double (*))(dataptrarray[3])); + _utc2 = ((double (*))(dataptrarray[4])); + _dut1 = ((double (*))(dataptrarray[5])); + _elong = ((double (*))(dataptrarray[6])); + _phi = ((double (*))(dataptrarray[7])); + _hm = ((double (*))(dataptrarray[8])); + _xp = ((double (*))(dataptrarray[9])); + _yp = ((double (*))(dataptrarray[10])); + _phpa = ((double (*))(dataptrarray[11])); + _tc = ((double (*))(dataptrarray[12])); + _rh = ((double (*))(dataptrarray[13])); + _wl = ((double (*))(dataptrarray[14])); + _rc = ((double (*))(dataptrarray[15])); + _dc = ((double (*))(dataptrarray[16])); + + _c_retval = eraAtoc13(_type, *_ob1, *_ob2, *_utc1, *_utc2, *_dut1, *_elong, *_phi, *_hm, *_xp, *_yp, *_phpa, *_tc, *_rh, *_wl, _rc, _dc); + *((int *)(dataptrarray[17])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_atoi13(PyObject *self, PyObject *args, PyObject *kwds) +{ + const char (*_type); + double (*_ob1); + double (*_ob2); + double (*_utc1); + double (*_utc2); + double (*_dut1); + double (*_elong); + double (*_phi); + double (*_hm); + double (*_xp); + double (*_yp); + double (*_phpa); + double (*_tc); + double (*_rh); + double (*_wl); + double (*_ri); + double (*_di); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _type = ((const char (*))(dataptrarray[0])); + _ob1 = ((double (*))(dataptrarray[1])); + _ob2 = ((double (*))(dataptrarray[2])); + _utc1 = ((double (*))(dataptrarray[3])); + _utc2 = ((double (*))(dataptrarray[4])); + _dut1 = ((double (*))(dataptrarray[5])); + _elong = ((double (*))(dataptrarray[6])); + _phi = ((double (*))(dataptrarray[7])); + _hm = ((double (*))(dataptrarray[8])); + _xp = ((double (*))(dataptrarray[9])); + _yp = ((double (*))(dataptrarray[10])); + _phpa = ((double (*))(dataptrarray[11])); + _tc = ((double (*))(dataptrarray[12])); + _rh = ((double (*))(dataptrarray[13])); + _wl = ((double (*))(dataptrarray[14])); + _ri = ((double (*))(dataptrarray[15])); + _di = ((double (*))(dataptrarray[16])); + + _c_retval = eraAtoi13(_type, *_ob1, *_ob2, *_utc1, *_utc2, *_dut1, *_elong, *_phi, *_hm, *_xp, *_yp, *_phpa, *_tc, *_rh, *_wl, _ri, _di); + *((int *)(dataptrarray[17])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_atoiq(PyObject *self, PyObject *args, PyObject *kwds) +{ + const char (*_type); + double (*_ob1); + double (*_ob2); + eraASTROM (*_astrom); + double (*_ri); + double (*_di); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _type = ((const char (*))(dataptrarray[0])); + _ob1 = ((double (*))(dataptrarray[1])); + _ob2 = ((double (*))(dataptrarray[2])); + _astrom = ((eraASTROM (*))(dataptrarray[3])); + _ri = ((double (*))(dataptrarray[4])); + _di = ((double (*))(dataptrarray[5])); + + eraAtoiq(_type, *_ob1, *_ob2, _astrom, _ri, _di); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_ld(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_bm); + double (*_p)[3]; + double (*_q)[3]; + double (*_e)[3]; + double (*_em); + double (*_dlim); + double (*_p1)[3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _bm = ((double (*))(dataptrarray[0])); + _p = ((double (*)[3])(dataptrarray[1])); + _q = ((double (*)[3])(dataptrarray[2])); + _e = ((double (*)[3])(dataptrarray[3])); + _em = ((double (*))(dataptrarray[4])); + _dlim = ((double (*))(dataptrarray[5])); + _p1 = ((double (*)[3])(dataptrarray[6])); + + eraLd(*_bm, *_p, *_q, *_e, *_em, *_dlim, *_p1); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_ldn(PyObject *self, PyObject *args, PyObject *kwds) +{ + int (*_n); + eraLDBODY (*_b); + double (*_ob)[3]; + double (*_sc)[3]; + double (*_sn)[3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _n = ((int (*))(dataptrarray[0])); + _b = ((eraLDBODY (*))(dataptrarray[1])); + _ob = ((double (*)[3])(dataptrarray[2])); + _sc = ((double (*)[3])(dataptrarray[3])); + _sn = ((double (*)[3])(dataptrarray[4])); + + eraLdn(*_n, _b, *_ob, *_sc, *_sn); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_ldsun(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_p)[3]; + double (*_e)[3]; + double (*_em); + double (*_p1)[3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _p = ((double (*)[3])(dataptrarray[0])); + _e = ((double (*)[3])(dataptrarray[1])); + _em = ((double (*))(dataptrarray[2])); + _p1 = ((double (*)[3])(dataptrarray[3])); + + eraLdsun(*_p, *_e, *_em, *_p1); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pmpx(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_rc); + double (*_dc); + double (*_pr); + double (*_pd); + double (*_px); + double (*_rv); + double (*_pmt); + double (*_pob)[3]; + double (*_pco)[3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _rc = ((double (*))(dataptrarray[0])); + _dc = ((double (*))(dataptrarray[1])); + _pr = ((double (*))(dataptrarray[2])); + _pd = ((double (*))(dataptrarray[3])); + _px = ((double (*))(dataptrarray[4])); + _rv = ((double (*))(dataptrarray[5])); + _pmt = ((double (*))(dataptrarray[6])); + _pob = ((double (*)[3])(dataptrarray[7])); + _pco = ((double (*)[3])(dataptrarray[8])); + + eraPmpx(*_rc, *_dc, *_pr, *_pd, *_px, *_rv, *_pmt, *_pob, *_pco); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pmsafe(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_ra1); + double (*_dec1); + double (*_pmr1); + double (*_pmd1); + double (*_px1); + double (*_rv1); + double (*_ep1a); + double (*_ep1b); + double (*_ep2a); + double (*_ep2b); + double (*_ra2); + double (*_dec2); + double (*_pmr2); + double (*_pmd2); + double (*_px2); + double (*_rv2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ra1 = ((double (*))(dataptrarray[0])); + _dec1 = ((double (*))(dataptrarray[1])); + _pmr1 = ((double (*))(dataptrarray[2])); + _pmd1 = ((double (*))(dataptrarray[3])); + _px1 = ((double (*))(dataptrarray[4])); + _rv1 = ((double (*))(dataptrarray[5])); + _ep1a = ((double (*))(dataptrarray[6])); + _ep1b = ((double (*))(dataptrarray[7])); + _ep2a = ((double (*))(dataptrarray[8])); + _ep2b = ((double (*))(dataptrarray[9])); + _ra2 = ((double (*))(dataptrarray[10])); + _dec2 = ((double (*))(dataptrarray[11])); + _pmr2 = ((double (*))(dataptrarray[12])); + _pmd2 = ((double (*))(dataptrarray[13])); + _px2 = ((double (*))(dataptrarray[14])); + _rv2 = ((double (*))(dataptrarray[15])); + + _c_retval = eraPmsafe(*_ra1, *_dec1, *_pmr1, *_pmd1, *_px1, *_rv1, *_ep1a, *_ep1b, *_ep2a, *_ep2b, _ra2, _dec2, _pmr2, _pmd2, _px2, _rv2); + *((int *)(dataptrarray[16])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_pvtob(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_elong); + double (*_phi); + double (*_hm); + double (*_xp); + double (*_yp); + double (*_sp); + double (*_theta); + double (*_pv)[2][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _elong = ((double (*))(dataptrarray[0])); + _phi = ((double (*))(dataptrarray[1])); + _hm = ((double (*))(dataptrarray[2])); + _xp = ((double (*))(dataptrarray[3])); + _yp = ((double (*))(dataptrarray[4])); + _sp = ((double (*))(dataptrarray[5])); + _theta = ((double (*))(dataptrarray[6])); + _pv = ((double (*)[2][3])(dataptrarray[7])); + + eraPvtob(*_elong, *_phi, *_hm, *_xp, *_yp, *_sp, *_theta, *_pv); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_refco(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_phpa); + double (*_tc); + double (*_rh); + double (*_wl); + double (*_refa); + double (*_refb); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _phpa = ((double (*))(dataptrarray[0])); + _tc = ((double (*))(dataptrarray[1])); + _rh = ((double (*))(dataptrarray[2])); + _wl = ((double (*))(dataptrarray[3])); + _refa = ((double (*))(dataptrarray[4])); + _refb = ((double (*))(dataptrarray[5])); + + eraRefco(*_phpa, *_tc, *_rh, *_wl, _refa, _refb); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_epv00(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_pvh)[2][3]; + double (*_pvb)[2][3]; + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _pvh = ((double (*)[2][3])(dataptrarray[2])); + _pvb = ((double (*)[2][3])(dataptrarray[3])); + + _c_retval = eraEpv00(*_date1, *_date2, *_pvh, *_pvb); + *((int *)(dataptrarray[4])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_plan94(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + int (*_np); + double (*_pv)[2][3]; + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _np = ((int (*))(dataptrarray[2])); + _pv = ((double (*)[2][3])(dataptrarray[3])); + + _c_retval = eraPlan94(*_date1, *_date2, *_np, *_pv); + *((int *)(dataptrarray[4])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_fad03(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_t); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _t = ((double (*))(dataptrarray[0])); + + _c_retval = eraFad03(*_t); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_fae03(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_t); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _t = ((double (*))(dataptrarray[0])); + + _c_retval = eraFae03(*_t); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_faf03(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_t); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _t = ((double (*))(dataptrarray[0])); + + _c_retval = eraFaf03(*_t); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_faju03(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_t); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _t = ((double (*))(dataptrarray[0])); + + _c_retval = eraFaju03(*_t); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_fal03(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_t); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _t = ((double (*))(dataptrarray[0])); + + _c_retval = eraFal03(*_t); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_falp03(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_t); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _t = ((double (*))(dataptrarray[0])); + + _c_retval = eraFalp03(*_t); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_fama03(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_t); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _t = ((double (*))(dataptrarray[0])); + + _c_retval = eraFama03(*_t); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_fame03(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_t); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _t = ((double (*))(dataptrarray[0])); + + _c_retval = eraFame03(*_t); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_fane03(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_t); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _t = ((double (*))(dataptrarray[0])); + + _c_retval = eraFane03(*_t); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_faom03(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_t); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _t = ((double (*))(dataptrarray[0])); + + _c_retval = eraFaom03(*_t); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_fapa03(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_t); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _t = ((double (*))(dataptrarray[0])); + + _c_retval = eraFapa03(*_t); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_fasa03(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_t); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _t = ((double (*))(dataptrarray[0])); + + _c_retval = eraFasa03(*_t); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_faur03(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_t); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _t = ((double (*))(dataptrarray[0])); + + _c_retval = eraFaur03(*_t); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_fave03(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_t); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _t = ((double (*))(dataptrarray[0])); + + _c_retval = eraFave03(*_t); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_bi00(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_dpsibi); + double (*_depsbi); + double (*_dra); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _dpsibi = ((double (*))(dataptrarray[0])); + _depsbi = ((double (*))(dataptrarray[1])); + _dra = ((double (*))(dataptrarray[2])); + + eraBi00(_dpsibi, _depsbi, _dra); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_bp00(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rb)[3][3]; + double (*_rp)[3][3]; + double (*_rbp)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rb = ((double (*)[3][3])(dataptrarray[2])); + _rp = ((double (*)[3][3])(dataptrarray[3])); + _rbp = ((double (*)[3][3])(dataptrarray[4])); + + eraBp00(*_date1, *_date2, *_rb, *_rp, *_rbp); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_bp06(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rb)[3][3]; + double (*_rp)[3][3]; + double (*_rbp)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rb = ((double (*)[3][3])(dataptrarray[2])); + _rp = ((double (*)[3][3])(dataptrarray[3])); + _rbp = ((double (*)[3][3])(dataptrarray[4])); + + eraBp06(*_date1, *_date2, *_rb, *_rp, *_rbp); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_bpn2xy(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_rbpn)[3][3]; + double (*_x); + double (*_y); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _rbpn = ((double (*)[3][3])(dataptrarray[0])); + _x = ((double (*))(dataptrarray[1])); + _y = ((double (*))(dataptrarray[2])); + + eraBpn2xy(*_rbpn, _x, _y); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_c2i00a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rc2i)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rc2i = ((double (*)[3][3])(dataptrarray[2])); + + eraC2i00a(*_date1, *_date2, *_rc2i); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_c2i00b(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rc2i)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rc2i = ((double (*)[3][3])(dataptrarray[2])); + + eraC2i00b(*_date1, *_date2, *_rc2i); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_c2i06a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rc2i)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rc2i = ((double (*)[3][3])(dataptrarray[2])); + + eraC2i06a(*_date1, *_date2, *_rc2i); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_c2ibpn(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rbpn)[3][3]; + double (*_rc2i)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rbpn = ((double (*)[3][3])(dataptrarray[2])); + _rc2i = ((double (*)[3][3])(dataptrarray[3])); + + eraC2ibpn(*_date1, *_date2, *_rbpn, *_rc2i); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_c2ixy(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_x); + double (*_y); + double (*_rc2i)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _x = ((double (*))(dataptrarray[2])); + _y = ((double (*))(dataptrarray[3])); + _rc2i = ((double (*)[3][3])(dataptrarray[4])); + + eraC2ixy(*_date1, *_date2, *_x, *_y, *_rc2i); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_c2ixys(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_x); + double (*_y); + double (*_s); + double (*_rc2i)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _x = ((double (*))(dataptrarray[0])); + _y = ((double (*))(dataptrarray[1])); + _s = ((double (*))(dataptrarray[2])); + _rc2i = ((double (*)[3][3])(dataptrarray[3])); + + eraC2ixys(*_x, *_y, *_s, *_rc2i); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_c2t00a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tta); + double (*_ttb); + double (*_uta); + double (*_utb); + double (*_xp); + double (*_yp); + double (*_rc2t)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tta = ((double (*))(dataptrarray[0])); + _ttb = ((double (*))(dataptrarray[1])); + _uta = ((double (*))(dataptrarray[2])); + _utb = ((double (*))(dataptrarray[3])); + _xp = ((double (*))(dataptrarray[4])); + _yp = ((double (*))(dataptrarray[5])); + _rc2t = ((double (*)[3][3])(dataptrarray[6])); + + eraC2t00a(*_tta, *_ttb, *_uta, *_utb, *_xp, *_yp, *_rc2t); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_c2t00b(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tta); + double (*_ttb); + double (*_uta); + double (*_utb); + double (*_xp); + double (*_yp); + double (*_rc2t)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tta = ((double (*))(dataptrarray[0])); + _ttb = ((double (*))(dataptrarray[1])); + _uta = ((double (*))(dataptrarray[2])); + _utb = ((double (*))(dataptrarray[3])); + _xp = ((double (*))(dataptrarray[4])); + _yp = ((double (*))(dataptrarray[5])); + _rc2t = ((double (*)[3][3])(dataptrarray[6])); + + eraC2t00b(*_tta, *_ttb, *_uta, *_utb, *_xp, *_yp, *_rc2t); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_c2t06a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tta); + double (*_ttb); + double (*_uta); + double (*_utb); + double (*_xp); + double (*_yp); + double (*_rc2t)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tta = ((double (*))(dataptrarray[0])); + _ttb = ((double (*))(dataptrarray[1])); + _uta = ((double (*))(dataptrarray[2])); + _utb = ((double (*))(dataptrarray[3])); + _xp = ((double (*))(dataptrarray[4])); + _yp = ((double (*))(dataptrarray[5])); + _rc2t = ((double (*)[3][3])(dataptrarray[6])); + + eraC2t06a(*_tta, *_ttb, *_uta, *_utb, *_xp, *_yp, *_rc2t); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_c2tcio(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_rc2i)[3][3]; + double (*_era); + double (*_rpom)[3][3]; + double (*_rc2t)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _rc2i = ((double (*)[3][3])(dataptrarray[0])); + _era = ((double (*))(dataptrarray[1])); + _rpom = ((double (*)[3][3])(dataptrarray[2])); + _rc2t = ((double (*)[3][3])(dataptrarray[3])); + + eraC2tcio(*_rc2i, *_era, *_rpom, *_rc2t); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_c2teqx(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_rbpn)[3][3]; + double (*_gst); + double (*_rpom)[3][3]; + double (*_rc2t)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _rbpn = ((double (*)[3][3])(dataptrarray[0])); + _gst = ((double (*))(dataptrarray[1])); + _rpom = ((double (*)[3][3])(dataptrarray[2])); + _rc2t = ((double (*)[3][3])(dataptrarray[3])); + + eraC2teqx(*_rbpn, *_gst, *_rpom, *_rc2t); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_c2tpe(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tta); + double (*_ttb); + double (*_uta); + double (*_utb); + double (*_dpsi); + double (*_deps); + double (*_xp); + double (*_yp); + double (*_rc2t)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tta = ((double (*))(dataptrarray[0])); + _ttb = ((double (*))(dataptrarray[1])); + _uta = ((double (*))(dataptrarray[2])); + _utb = ((double (*))(dataptrarray[3])); + _dpsi = ((double (*))(dataptrarray[4])); + _deps = ((double (*))(dataptrarray[5])); + _xp = ((double (*))(dataptrarray[6])); + _yp = ((double (*))(dataptrarray[7])); + _rc2t = ((double (*)[3][3])(dataptrarray[8])); + + eraC2tpe(*_tta, *_ttb, *_uta, *_utb, *_dpsi, *_deps, *_xp, *_yp, *_rc2t); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_c2txy(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tta); + double (*_ttb); + double (*_uta); + double (*_utb); + double (*_x); + double (*_y); + double (*_xp); + double (*_yp); + double (*_rc2t)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tta = ((double (*))(dataptrarray[0])); + _ttb = ((double (*))(dataptrarray[1])); + _uta = ((double (*))(dataptrarray[2])); + _utb = ((double (*))(dataptrarray[3])); + _x = ((double (*))(dataptrarray[4])); + _y = ((double (*))(dataptrarray[5])); + _xp = ((double (*))(dataptrarray[6])); + _yp = ((double (*))(dataptrarray[7])); + _rc2t = ((double (*)[3][3])(dataptrarray[8])); + + eraC2txy(*_tta, *_ttb, *_uta, *_utb, *_x, *_y, *_xp, *_yp, *_rc2t); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_eo06a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraEo06a(*_date1, *_date2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_eors(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_rnpb)[3][3]; + double (*_s); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _rnpb = ((double (*)[3][3])(dataptrarray[0])); + _s = ((double (*))(dataptrarray[1])); + + _c_retval = eraEors(*_rnpb, *_s); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_fw2m(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_gamb); + double (*_phib); + double (*_psi); + double (*_eps); + double (*_r)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _gamb = ((double (*))(dataptrarray[0])); + _phib = ((double (*))(dataptrarray[1])); + _psi = ((double (*))(dataptrarray[2])); + _eps = ((double (*))(dataptrarray[3])); + _r = ((double (*)[3][3])(dataptrarray[4])); + + eraFw2m(*_gamb, *_phib, *_psi, *_eps, *_r); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_fw2xy(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_gamb); + double (*_phib); + double (*_psi); + double (*_eps); + double (*_x); + double (*_y); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _gamb = ((double (*))(dataptrarray[0])); + _phib = ((double (*))(dataptrarray[1])); + _psi = ((double (*))(dataptrarray[2])); + _eps = ((double (*))(dataptrarray[3])); + _x = ((double (*))(dataptrarray[4])); + _y = ((double (*))(dataptrarray[5])); + + eraFw2xy(*_gamb, *_phib, *_psi, *_eps, _x, _y); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_ltp(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_epj); + double (*_rp)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _epj = ((double (*))(dataptrarray[0])); + _rp = ((double (*)[3][3])(dataptrarray[1])); + + eraLtp(*_epj, *_rp); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_ltpb(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_epj); + double (*_rpb)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _epj = ((double (*))(dataptrarray[0])); + _rpb = ((double (*)[3][3])(dataptrarray[1])); + + eraLtpb(*_epj, *_rpb); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_ltpecl(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_epj); + double (*_vec)[3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _epj = ((double (*))(dataptrarray[0])); + _vec = ((double (*)[3])(dataptrarray[1])); + + eraLtpecl(*_epj, *_vec); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_ltpequ(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_epj); + double (*_veq)[3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _epj = ((double (*))(dataptrarray[0])); + _veq = ((double (*)[3])(dataptrarray[1])); + + eraLtpequ(*_epj, *_veq); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_num00a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rmatn)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rmatn = ((double (*)[3][3])(dataptrarray[2])); + + eraNum00a(*_date1, *_date2, *_rmatn); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_num00b(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rmatn)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rmatn = ((double (*)[3][3])(dataptrarray[2])); + + eraNum00b(*_date1, *_date2, *_rmatn); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_num06a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rmatn)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rmatn = ((double (*)[3][3])(dataptrarray[2])); + + eraNum06a(*_date1, *_date2, *_rmatn); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_numat(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_epsa); + double (*_dpsi); + double (*_deps); + double (*_rmatn)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _epsa = ((double (*))(dataptrarray[0])); + _dpsi = ((double (*))(dataptrarray[1])); + _deps = ((double (*))(dataptrarray[2])); + _rmatn = ((double (*)[3][3])(dataptrarray[3])); + + eraNumat(*_epsa, *_dpsi, *_deps, *_rmatn); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_nut00a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_dpsi); + double (*_deps); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _dpsi = ((double (*))(dataptrarray[2])); + _deps = ((double (*))(dataptrarray[3])); + + eraNut00a(*_date1, *_date2, _dpsi, _deps); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_nut00b(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_dpsi); + double (*_deps); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _dpsi = ((double (*))(dataptrarray[2])); + _deps = ((double (*))(dataptrarray[3])); + + eraNut00b(*_date1, *_date2, _dpsi, _deps); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_nut06a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_dpsi); + double (*_deps); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _dpsi = ((double (*))(dataptrarray[2])); + _deps = ((double (*))(dataptrarray[3])); + + eraNut06a(*_date1, *_date2, _dpsi, _deps); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_nut80(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_dpsi); + double (*_deps); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _dpsi = ((double (*))(dataptrarray[2])); + _deps = ((double (*))(dataptrarray[3])); + + eraNut80(*_date1, *_date2, _dpsi, _deps); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_nutm80(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rmatn)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rmatn = ((double (*)[3][3])(dataptrarray[2])); + + eraNutm80(*_date1, *_date2, *_rmatn); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_obl06(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraObl06(*_date1, *_date2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_obl80(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraObl80(*_date1, *_date2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_p06e(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_eps0); + double (*_psia); + double (*_oma); + double (*_bpa); + double (*_bqa); + double (*_pia); + double (*_bpia); + double (*_epsa); + double (*_chia); + double (*_za); + double (*_zetaa); + double (*_thetaa); + double (*_pa); + double (*_gam); + double (*_phi); + double (*_psi); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _eps0 = ((double (*))(dataptrarray[2])); + _psia = ((double (*))(dataptrarray[3])); + _oma = ((double (*))(dataptrarray[4])); + _bpa = ((double (*))(dataptrarray[5])); + _bqa = ((double (*))(dataptrarray[6])); + _pia = ((double (*))(dataptrarray[7])); + _bpia = ((double (*))(dataptrarray[8])); + _epsa = ((double (*))(dataptrarray[9])); + _chia = ((double (*))(dataptrarray[10])); + _za = ((double (*))(dataptrarray[11])); + _zetaa = ((double (*))(dataptrarray[12])); + _thetaa = ((double (*))(dataptrarray[13])); + _pa = ((double (*))(dataptrarray[14])); + _gam = ((double (*))(dataptrarray[15])); + _phi = ((double (*))(dataptrarray[16])); + _psi = ((double (*))(dataptrarray[17])); + + eraP06e(*_date1, *_date2, _eps0, _psia, _oma, _bpa, _bqa, _pia, _bpia, _epsa, _chia, _za, _zetaa, _thetaa, _pa, _gam, _phi, _psi); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pb06(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_bzeta); + double (*_bz); + double (*_btheta); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _bzeta = ((double (*))(dataptrarray[2])); + _bz = ((double (*))(dataptrarray[3])); + _btheta = ((double (*))(dataptrarray[4])); + + eraPb06(*_date1, *_date2, _bzeta, _bz, _btheta); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pfw06(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_gamb); + double (*_phib); + double (*_psib); + double (*_epsa); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _gamb = ((double (*))(dataptrarray[2])); + _phib = ((double (*))(dataptrarray[3])); + _psib = ((double (*))(dataptrarray[4])); + _epsa = ((double (*))(dataptrarray[5])); + + eraPfw06(*_date1, *_date2, _gamb, _phib, _psib, _epsa); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pmat00(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rbp)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rbp = ((double (*)[3][3])(dataptrarray[2])); + + eraPmat00(*_date1, *_date2, *_rbp); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pmat06(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rbp)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rbp = ((double (*)[3][3])(dataptrarray[2])); + + eraPmat06(*_date1, *_date2, *_rbp); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pmat76(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rmatp)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rmatp = ((double (*)[3][3])(dataptrarray[2])); + + eraPmat76(*_date1, *_date2, *_rmatp); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pn00(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_dpsi); + double (*_deps); + double (*_epsa); + double (*_rb)[3][3]; + double (*_rp)[3][3]; + double (*_rbp)[3][3]; + double (*_rn)[3][3]; + double (*_rbpn)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _dpsi = ((double (*))(dataptrarray[2])); + _deps = ((double (*))(dataptrarray[3])); + _epsa = ((double (*))(dataptrarray[4])); + _rb = ((double (*)[3][3])(dataptrarray[5])); + _rp = ((double (*)[3][3])(dataptrarray[6])); + _rbp = ((double (*)[3][3])(dataptrarray[7])); + _rn = ((double (*)[3][3])(dataptrarray[8])); + _rbpn = ((double (*)[3][3])(dataptrarray[9])); + + eraPn00(*_date1, *_date2, *_dpsi, *_deps, _epsa, *_rb, *_rp, *_rbp, *_rn, *_rbpn); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pn00a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_dpsi); + double (*_deps); + double (*_epsa); + double (*_rb)[3][3]; + double (*_rp)[3][3]; + double (*_rbp)[3][3]; + double (*_rn)[3][3]; + double (*_rbpn)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _dpsi = ((double (*))(dataptrarray[2])); + _deps = ((double (*))(dataptrarray[3])); + _epsa = ((double (*))(dataptrarray[4])); + _rb = ((double (*)[3][3])(dataptrarray[5])); + _rp = ((double (*)[3][3])(dataptrarray[6])); + _rbp = ((double (*)[3][3])(dataptrarray[7])); + _rn = ((double (*)[3][3])(dataptrarray[8])); + _rbpn = ((double (*)[3][3])(dataptrarray[9])); + + eraPn00a(*_date1, *_date2, _dpsi, _deps, _epsa, *_rb, *_rp, *_rbp, *_rn, *_rbpn); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pn00b(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_dpsi); + double (*_deps); + double (*_epsa); + double (*_rb)[3][3]; + double (*_rp)[3][3]; + double (*_rbp)[3][3]; + double (*_rn)[3][3]; + double (*_rbpn)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _dpsi = ((double (*))(dataptrarray[2])); + _deps = ((double (*))(dataptrarray[3])); + _epsa = ((double (*))(dataptrarray[4])); + _rb = ((double (*)[3][3])(dataptrarray[5])); + _rp = ((double (*)[3][3])(dataptrarray[6])); + _rbp = ((double (*)[3][3])(dataptrarray[7])); + _rn = ((double (*)[3][3])(dataptrarray[8])); + _rbpn = ((double (*)[3][3])(dataptrarray[9])); + + eraPn00b(*_date1, *_date2, _dpsi, _deps, _epsa, *_rb, *_rp, *_rbp, *_rn, *_rbpn); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pn06(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_dpsi); + double (*_deps); + double (*_epsa); + double (*_rb)[3][3]; + double (*_rp)[3][3]; + double (*_rbp)[3][3]; + double (*_rn)[3][3]; + double (*_rbpn)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _dpsi = ((double (*))(dataptrarray[2])); + _deps = ((double (*))(dataptrarray[3])); + _epsa = ((double (*))(dataptrarray[4])); + _rb = ((double (*)[3][3])(dataptrarray[5])); + _rp = ((double (*)[3][3])(dataptrarray[6])); + _rbp = ((double (*)[3][3])(dataptrarray[7])); + _rn = ((double (*)[3][3])(dataptrarray[8])); + _rbpn = ((double (*)[3][3])(dataptrarray[9])); + + eraPn06(*_date1, *_date2, *_dpsi, *_deps, _epsa, *_rb, *_rp, *_rbp, *_rn, *_rbpn); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pn06a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_dpsi); + double (*_deps); + double (*_epsa); + double (*_rb)[3][3]; + double (*_rp)[3][3]; + double (*_rbp)[3][3]; + double (*_rn)[3][3]; + double (*_rbpn)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _dpsi = ((double (*))(dataptrarray[2])); + _deps = ((double (*))(dataptrarray[3])); + _epsa = ((double (*))(dataptrarray[4])); + _rb = ((double (*)[3][3])(dataptrarray[5])); + _rp = ((double (*)[3][3])(dataptrarray[6])); + _rbp = ((double (*)[3][3])(dataptrarray[7])); + _rn = ((double (*)[3][3])(dataptrarray[8])); + _rbpn = ((double (*)[3][3])(dataptrarray[9])); + + eraPn06a(*_date1, *_date2, _dpsi, _deps, _epsa, *_rb, *_rp, *_rbp, *_rn, *_rbpn); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pnm00a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rbpn)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rbpn = ((double (*)[3][3])(dataptrarray[2])); + + eraPnm00a(*_date1, *_date2, *_rbpn); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pnm00b(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rbpn)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rbpn = ((double (*)[3][3])(dataptrarray[2])); + + eraPnm00b(*_date1, *_date2, *_rbpn); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pnm06a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rnpb)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rnpb = ((double (*)[3][3])(dataptrarray[2])); + + eraPnm06a(*_date1, *_date2, *_rnpb); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pnm80(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rmatpn)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rmatpn = ((double (*)[3][3])(dataptrarray[2])); + + eraPnm80(*_date1, *_date2, *_rmatpn); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pom00(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_xp); + double (*_yp); + double (*_sp); + double (*_rpom)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _xp = ((double (*))(dataptrarray[0])); + _yp = ((double (*))(dataptrarray[1])); + _sp = ((double (*))(dataptrarray[2])); + _rpom = ((double (*)[3][3])(dataptrarray[3])); + + eraPom00(*_xp, *_yp, *_sp, *_rpom); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pr00(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_dpsipr); + double (*_depspr); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _dpsipr = ((double (*))(dataptrarray[2])); + _depspr = ((double (*))(dataptrarray[3])); + + eraPr00(*_date1, *_date2, _dpsipr, _depspr); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_prec76(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date01); + double (*_date02); + double (*_date11); + double (*_date12); + double (*_zeta); + double (*_z); + double (*_theta); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date01 = ((double (*))(dataptrarray[0])); + _date02 = ((double (*))(dataptrarray[1])); + _date11 = ((double (*))(dataptrarray[2])); + _date12 = ((double (*))(dataptrarray[3])); + _zeta = ((double (*))(dataptrarray[4])); + _z = ((double (*))(dataptrarray[5])); + _theta = ((double (*))(dataptrarray[6])); + + eraPrec76(*_date01, *_date02, *_date11, *_date12, _zeta, _z, _theta); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_s00(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_x); + double (*_y); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _x = ((double (*))(dataptrarray[2])); + _y = ((double (*))(dataptrarray[3])); + + _c_retval = eraS00(*_date1, *_date2, *_x, *_y); + *((double *)(dataptrarray[4])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_s00a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraS00a(*_date1, *_date2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_s00b(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraS00b(*_date1, *_date2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_s06(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_x); + double (*_y); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _x = ((double (*))(dataptrarray[2])); + _y = ((double (*))(dataptrarray[3])); + + _c_retval = eraS06(*_date1, *_date2, *_x, *_y); + *((double *)(dataptrarray[4])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_s06a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraS06a(*_date1, *_date2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_sp00(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraSp00(*_date1, *_date2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_xy06(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_x); + double (*_y); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _x = ((double (*))(dataptrarray[2])); + _y = ((double (*))(dataptrarray[3])); + + eraXy06(*_date1, *_date2, _x, _y); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_xys00a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_x); + double (*_y); + double (*_s); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _x = ((double (*))(dataptrarray[2])); + _y = ((double (*))(dataptrarray[3])); + _s = ((double (*))(dataptrarray[4])); + + eraXys00a(*_date1, *_date2, _x, _y, _s); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_xys00b(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_x); + double (*_y); + double (*_s); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _x = ((double (*))(dataptrarray[2])); + _y = ((double (*))(dataptrarray[3])); + _s = ((double (*))(dataptrarray[4])); + + eraXys00b(*_date1, *_date2, _x, _y, _s); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_xys06a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_x); + double (*_y); + double (*_s); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _x = ((double (*))(dataptrarray[2])); + _y = ((double (*))(dataptrarray[3])); + _s = ((double (*))(dataptrarray[4])); + + eraXys06a(*_date1, *_date2, _x, _y, _s); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_ee00(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_epsa); + double (*_dpsi); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _epsa = ((double (*))(dataptrarray[2])); + _dpsi = ((double (*))(dataptrarray[3])); + + _c_retval = eraEe00(*_date1, *_date2, *_epsa, *_dpsi); + *((double *)(dataptrarray[4])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_ee00a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraEe00a(*_date1, *_date2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_ee00b(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraEe00b(*_date1, *_date2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_ee06a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraEe06a(*_date1, *_date2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_eect00(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraEect00(*_date1, *_date2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_eqeq94(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraEqeq94(*_date1, *_date2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_era00(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_dj1); + double (*_dj2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _dj1 = ((double (*))(dataptrarray[0])); + _dj2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraEra00(*_dj1, *_dj2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_gmst00(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_uta); + double (*_utb); + double (*_tta); + double (*_ttb); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _uta = ((double (*))(dataptrarray[0])); + _utb = ((double (*))(dataptrarray[1])); + _tta = ((double (*))(dataptrarray[2])); + _ttb = ((double (*))(dataptrarray[3])); + + _c_retval = eraGmst00(*_uta, *_utb, *_tta, *_ttb); + *((double *)(dataptrarray[4])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_gmst06(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_uta); + double (*_utb); + double (*_tta); + double (*_ttb); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _uta = ((double (*))(dataptrarray[0])); + _utb = ((double (*))(dataptrarray[1])); + _tta = ((double (*))(dataptrarray[2])); + _ttb = ((double (*))(dataptrarray[3])); + + _c_retval = eraGmst06(*_uta, *_utb, *_tta, *_ttb); + *((double *)(dataptrarray[4])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_gmst82(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_dj1); + double (*_dj2); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _dj1 = ((double (*))(dataptrarray[0])); + _dj2 = ((double (*))(dataptrarray[1])); + + _c_retval = eraGmst82(*_dj1, *_dj2); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_gst00a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_uta); + double (*_utb); + double (*_tta); + double (*_ttb); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _uta = ((double (*))(dataptrarray[0])); + _utb = ((double (*))(dataptrarray[1])); + _tta = ((double (*))(dataptrarray[2])); + _ttb = ((double (*))(dataptrarray[3])); + + _c_retval = eraGst00a(*_uta, *_utb, *_tta, *_ttb); + *((double *)(dataptrarray[4])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_gst00b(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_uta); + double (*_utb); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _uta = ((double (*))(dataptrarray[0])); + _utb = ((double (*))(dataptrarray[1])); + + _c_retval = eraGst00b(*_uta, *_utb); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_gst06(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_uta); + double (*_utb); + double (*_tta); + double (*_ttb); + double (*_rnpb)[3][3]; + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _uta = ((double (*))(dataptrarray[0])); + _utb = ((double (*))(dataptrarray[1])); + _tta = ((double (*))(dataptrarray[2])); + _ttb = ((double (*))(dataptrarray[3])); + _rnpb = ((double (*)[3][3])(dataptrarray[4])); + + _c_retval = eraGst06(*_uta, *_utb, *_tta, *_ttb, *_rnpb); + *((double *)(dataptrarray[5])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_gst06a(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_uta); + double (*_utb); + double (*_tta); + double (*_ttb); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _uta = ((double (*))(dataptrarray[0])); + _utb = ((double (*))(dataptrarray[1])); + _tta = ((double (*))(dataptrarray[2])); + _ttb = ((double (*))(dataptrarray[3])); + + _c_retval = eraGst06a(*_uta, *_utb, *_tta, *_ttb); + *((double *)(dataptrarray[4])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_gst94(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_uta); + double (*_utb); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _uta = ((double (*))(dataptrarray[0])); + _utb = ((double (*))(dataptrarray[1])); + + _c_retval = eraGst94(*_uta, *_utb); + *((double *)(dataptrarray[2])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pvstar(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_pv)[2][3]; + double (*_ra); + double (*_dec); + double (*_pmr); + double (*_pmd); + double (*_px); + double (*_rv); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _pv = ((double (*)[2][3])(dataptrarray[0])); + _ra = ((double (*))(dataptrarray[1])); + _dec = ((double (*))(dataptrarray[2])); + _pmr = ((double (*))(dataptrarray[3])); + _pmd = ((double (*))(dataptrarray[4])); + _px = ((double (*))(dataptrarray[5])); + _rv = ((double (*))(dataptrarray[6])); + + _c_retval = eraPvstar(*_pv, _ra, _dec, _pmr, _pmd, _px, _rv); + *((int *)(dataptrarray[7])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_starpv(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_ra); + double (*_dec); + double (*_pmr); + double (*_pmd); + double (*_px); + double (*_rv); + double (*_pv)[2][3]; + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ra = ((double (*))(dataptrarray[0])); + _dec = ((double (*))(dataptrarray[1])); + _pmr = ((double (*))(dataptrarray[2])); + _pmd = ((double (*))(dataptrarray[3])); + _px = ((double (*))(dataptrarray[4])); + _rv = ((double (*))(dataptrarray[5])); + _pv = ((double (*)[2][3])(dataptrarray[6])); + + _c_retval = eraStarpv(*_ra, *_dec, *_pmr, *_pmd, *_px, *_rv, *_pv); + *((int *)(dataptrarray[7])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_fk52h(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_r5); + double (*_d5); + double (*_dr5); + double (*_dd5); + double (*_px5); + double (*_rv5); + double (*_rh); + double (*_dh); + double (*_drh); + double (*_ddh); + double (*_pxh); + double (*_rvh); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _r5 = ((double (*))(dataptrarray[0])); + _d5 = ((double (*))(dataptrarray[1])); + _dr5 = ((double (*))(dataptrarray[2])); + _dd5 = ((double (*))(dataptrarray[3])); + _px5 = ((double (*))(dataptrarray[4])); + _rv5 = ((double (*))(dataptrarray[5])); + _rh = ((double (*))(dataptrarray[6])); + _dh = ((double (*))(dataptrarray[7])); + _drh = ((double (*))(dataptrarray[8])); + _ddh = ((double (*))(dataptrarray[9])); + _pxh = ((double (*))(dataptrarray[10])); + _rvh = ((double (*))(dataptrarray[11])); + + eraFk52h(*_r5, *_d5, *_dr5, *_dd5, *_px5, *_rv5, _rh, _dh, _drh, _ddh, _pxh, _rvh); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_fk5hip(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_r5h)[3][3]; + double (*_s5h)[3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _r5h = ((double (*)[3][3])(dataptrarray[0])); + _s5h = ((double (*)[3])(dataptrarray[1])); + + eraFk5hip(*_r5h, *_s5h); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_fk5hz(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_r5); + double (*_d5); + double (*_date1); + double (*_date2); + double (*_rh); + double (*_dh); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _r5 = ((double (*))(dataptrarray[0])); + _d5 = ((double (*))(dataptrarray[1])); + _date1 = ((double (*))(dataptrarray[2])); + _date2 = ((double (*))(dataptrarray[3])); + _rh = ((double (*))(dataptrarray[4])); + _dh = ((double (*))(dataptrarray[5])); + + eraFk5hz(*_r5, *_d5, *_date1, *_date2, _rh, _dh); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_h2fk5(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_rh); + double (*_dh); + double (*_drh); + double (*_ddh); + double (*_pxh); + double (*_rvh); + double (*_r5); + double (*_d5); + double (*_dr5); + double (*_dd5); + double (*_px5); + double (*_rv5); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _rh = ((double (*))(dataptrarray[0])); + _dh = ((double (*))(dataptrarray[1])); + _drh = ((double (*))(dataptrarray[2])); + _ddh = ((double (*))(dataptrarray[3])); + _pxh = ((double (*))(dataptrarray[4])); + _rvh = ((double (*))(dataptrarray[5])); + _r5 = ((double (*))(dataptrarray[6])); + _d5 = ((double (*))(dataptrarray[7])); + _dr5 = ((double (*))(dataptrarray[8])); + _dd5 = ((double (*))(dataptrarray[9])); + _px5 = ((double (*))(dataptrarray[10])); + _rv5 = ((double (*))(dataptrarray[11])); + + eraH2fk5(*_rh, *_dh, *_drh, *_ddh, *_pxh, *_rvh, _r5, _d5, _dr5, _dd5, _px5, _rv5); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_hfk5z(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_rh); + double (*_dh); + double (*_date1); + double (*_date2); + double (*_r5); + double (*_d5); + double (*_dr5); + double (*_dd5); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _rh = ((double (*))(dataptrarray[0])); + _dh = ((double (*))(dataptrarray[1])); + _date1 = ((double (*))(dataptrarray[2])); + _date2 = ((double (*))(dataptrarray[3])); + _r5 = ((double (*))(dataptrarray[4])); + _d5 = ((double (*))(dataptrarray[5])); + _dr5 = ((double (*))(dataptrarray[6])); + _dd5 = ((double (*))(dataptrarray[7])); + + eraHfk5z(*_rh, *_dh, *_date1, *_date2, _r5, _d5, _dr5, _dd5); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_starpm(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_ra1); + double (*_dec1); + double (*_pmr1); + double (*_pmd1); + double (*_px1); + double (*_rv1); + double (*_ep1a); + double (*_ep1b); + double (*_ep2a); + double (*_ep2b); + double (*_ra2); + double (*_dec2); + double (*_pmr2); + double (*_pmd2); + double (*_px2); + double (*_rv2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ra1 = ((double (*))(dataptrarray[0])); + _dec1 = ((double (*))(dataptrarray[1])); + _pmr1 = ((double (*))(dataptrarray[2])); + _pmd1 = ((double (*))(dataptrarray[3])); + _px1 = ((double (*))(dataptrarray[4])); + _rv1 = ((double (*))(dataptrarray[5])); + _ep1a = ((double (*))(dataptrarray[6])); + _ep1b = ((double (*))(dataptrarray[7])); + _ep2a = ((double (*))(dataptrarray[8])); + _ep2b = ((double (*))(dataptrarray[9])); + _ra2 = ((double (*))(dataptrarray[10])); + _dec2 = ((double (*))(dataptrarray[11])); + _pmr2 = ((double (*))(dataptrarray[12])); + _pmd2 = ((double (*))(dataptrarray[13])); + _px2 = ((double (*))(dataptrarray[14])); + _rv2 = ((double (*))(dataptrarray[15])); + + _c_retval = eraStarpm(*_ra1, *_dec1, *_pmr1, *_pmd1, *_px1, *_rv1, *_ep1a, *_ep1b, *_ep2a, *_ep2b, _ra2, _dec2, _pmr2, _pmd2, _px2, _rv2); + *((int *)(dataptrarray[16])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_eceq06(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_dl); + double (*_db); + double (*_dr); + double (*_dd); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _dl = ((double (*))(dataptrarray[2])); + _db = ((double (*))(dataptrarray[3])); + _dr = ((double (*))(dataptrarray[4])); + _dd = ((double (*))(dataptrarray[5])); + + eraEceq06(*_date1, *_date2, *_dl, *_db, _dr, _dd); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_ecm06(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_rm)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _rm = ((double (*)[3][3])(dataptrarray[2])); + + eraEcm06(*_date1, *_date2, *_rm); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_eqec06(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_dr); + double (*_dd); + double (*_dl); + double (*_db); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _dr = ((double (*))(dataptrarray[2])); + _dd = ((double (*))(dataptrarray[3])); + _dl = ((double (*))(dataptrarray[4])); + _db = ((double (*))(dataptrarray[5])); + + eraEqec06(*_date1, *_date2, *_dr, *_dd, _dl, _db); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_lteceq(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_epj); + double (*_dl); + double (*_db); + double (*_dr); + double (*_dd); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _epj = ((double (*))(dataptrarray[0])); + _dl = ((double (*))(dataptrarray[1])); + _db = ((double (*))(dataptrarray[2])); + _dr = ((double (*))(dataptrarray[3])); + _dd = ((double (*))(dataptrarray[4])); + + eraLteceq(*_epj, *_dl, *_db, _dr, _dd); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_ltecm(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_epj); + double (*_rm)[3][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _epj = ((double (*))(dataptrarray[0])); + _rm = ((double (*)[3][3])(dataptrarray[1])); + + eraLtecm(*_epj, *_rm); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_lteqec(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_epj); + double (*_dr); + double (*_dd); + double (*_dl); + double (*_db); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _epj = ((double (*))(dataptrarray[0])); + _dr = ((double (*))(dataptrarray[1])); + _dd = ((double (*))(dataptrarray[2])); + _dl = ((double (*))(dataptrarray[3])); + _db = ((double (*))(dataptrarray[4])); + + eraLteqec(*_epj, *_dr, *_dd, _dl, _db); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_g2icrs(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_dl); + double (*_db); + double (*_dr); + double (*_dd); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _dl = ((double (*))(dataptrarray[0])); + _db = ((double (*))(dataptrarray[1])); + _dr = ((double (*))(dataptrarray[2])); + _dd = ((double (*))(dataptrarray[3])); + + eraG2icrs(*_dl, *_db, _dr, _dd); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_icrs2g(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_dr); + double (*_dd); + double (*_dl); + double (*_db); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _dr = ((double (*))(dataptrarray[0])); + _dd = ((double (*))(dataptrarray[1])); + _dl = ((double (*))(dataptrarray[2])); + _db = ((double (*))(dataptrarray[3])); + + eraIcrs2g(*_dr, *_dd, _dl, _db); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_eform(PyObject *self, PyObject *args, PyObject *kwds) +{ + int (*_n); + double (*_a); + double (*_f); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _n = ((int (*))(dataptrarray[0])); + _a = ((double (*))(dataptrarray[1])); + _f = ((double (*))(dataptrarray[2])); + + _c_retval = eraEform(*_n, _a, _f); + *((int *)(dataptrarray[3])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_gc2gd(PyObject *self, PyObject *args, PyObject *kwds) +{ + int (*_n); + double (*_xyz)[3]; + double (*_elong); + double (*_phi); + double (*_height); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _n = ((int (*))(dataptrarray[0])); + _xyz = ((double (*)[3])(dataptrarray[1])); + _elong = ((double (*))(dataptrarray[2])); + _phi = ((double (*))(dataptrarray[3])); + _height = ((double (*))(dataptrarray[4])); + + _c_retval = eraGc2gd(*_n, *_xyz, _elong, _phi, _height); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_gc2gde(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_a); + double (*_f); + double (*_xyz)[3]; + double (*_elong); + double (*_phi); + double (*_height); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _a = ((double (*))(dataptrarray[0])); + _f = ((double (*))(dataptrarray[1])); + _xyz = ((double (*)[3])(dataptrarray[2])); + _elong = ((double (*))(dataptrarray[3])); + _phi = ((double (*))(dataptrarray[4])); + _height = ((double (*))(dataptrarray[5])); + + _c_retval = eraGc2gde(*_a, *_f, *_xyz, _elong, _phi, _height); + *((int *)(dataptrarray[6])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_gd2gc(PyObject *self, PyObject *args, PyObject *kwds) +{ + int (*_n); + double (*_elong); + double (*_phi); + double (*_height); + double (*_xyz)[3]; + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _n = ((int (*))(dataptrarray[0])); + _elong = ((double (*))(dataptrarray[1])); + _phi = ((double (*))(dataptrarray[2])); + _height = ((double (*))(dataptrarray[3])); + _xyz = ((double (*)[3])(dataptrarray[4])); + + _c_retval = eraGd2gc(*_n, *_elong, *_phi, *_height, *_xyz); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_gd2gce(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_a); + double (*_f); + double (*_elong); + double (*_phi); + double (*_height); + double (*_xyz)[3]; + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _a = ((double (*))(dataptrarray[0])); + _f = ((double (*))(dataptrarray[1])); + _elong = ((double (*))(dataptrarray[2])); + _phi = ((double (*))(dataptrarray[3])); + _height = ((double (*))(dataptrarray[4])); + _xyz = ((double (*)[3])(dataptrarray[5])); + + _c_retval = eraGd2gce(*_a, *_f, *_elong, *_phi, *_height, *_xyz); + *((int *)(dataptrarray[6])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_d2dtf(PyObject *self, PyObject *args, PyObject *kwds) +{ + const char (*_scale); + int (*_ndp); + double (*_d1); + double (*_d2); + int (*_iy); + int (*_im); + int (*_id); + int (*_ihmsf)[4]; + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _scale = ((const char (*))(dataptrarray[0])); + _ndp = ((int (*))(dataptrarray[1])); + _d1 = ((double (*))(dataptrarray[2])); + _d2 = ((double (*))(dataptrarray[3])); + _iy = ((int (*))(dataptrarray[4])); + _im = ((int (*))(dataptrarray[5])); + _id = ((int (*))(dataptrarray[6])); + _ihmsf = ((int (*)[4])(dataptrarray[7])); + + _c_retval = eraD2dtf(_scale, *_ndp, *_d1, *_d2, _iy, _im, _id, *_ihmsf); + *((int *)(dataptrarray[8])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_dat(PyObject *self, PyObject *args, PyObject *kwds) +{ + int (*_iy); + int (*_im); + int (*_id); + double (*_fd); + double (*_deltat); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _iy = ((int (*))(dataptrarray[0])); + _im = ((int (*))(dataptrarray[1])); + _id = ((int (*))(dataptrarray[2])); + _fd = ((double (*))(dataptrarray[3])); + _deltat = ((double (*))(dataptrarray[4])); + + _c_retval = eraDat(*_iy, *_im, *_id, *_fd, _deltat); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_dtdb(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_date1); + double (*_date2); + double (*_ut); + double (*_elong); + double (*_u); + double (*_v); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _date1 = ((double (*))(dataptrarray[0])); + _date2 = ((double (*))(dataptrarray[1])); + _ut = ((double (*))(dataptrarray[2])); + _elong = ((double (*))(dataptrarray[3])); + _u = ((double (*))(dataptrarray[4])); + _v = ((double (*))(dataptrarray[5])); + + _c_retval = eraDtdb(*_date1, *_date2, *_ut, *_elong, *_u, *_v); + *((double *)(dataptrarray[6])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_dtf2d(PyObject *self, PyObject *args, PyObject *kwds) +{ + const char (*_scale); + int (*_iy); + int (*_im); + int (*_id); + int (*_ihr); + int (*_imn); + double (*_sec); + double (*_d1); + double (*_d2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _scale = ((const char (*))(dataptrarray[0])); + _iy = ((int (*))(dataptrarray[1])); + _im = ((int (*))(dataptrarray[2])); + _id = ((int (*))(dataptrarray[3])); + _ihr = ((int (*))(dataptrarray[4])); + _imn = ((int (*))(dataptrarray[5])); + _sec = ((double (*))(dataptrarray[6])); + _d1 = ((double (*))(dataptrarray[7])); + _d2 = ((double (*))(dataptrarray[8])); + + _c_retval = eraDtf2d(_scale, *_iy, *_im, *_id, *_ihr, *_imn, *_sec, _d1, _d2); + *((int *)(dataptrarray[9])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_taitt(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tai1); + double (*_tai2); + double (*_tt1); + double (*_tt2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tai1 = ((double (*))(dataptrarray[0])); + _tai2 = ((double (*))(dataptrarray[1])); + _tt1 = ((double (*))(dataptrarray[2])); + _tt2 = ((double (*))(dataptrarray[3])); + + _c_retval = eraTaitt(*_tai1, *_tai2, _tt1, _tt2); + *((int *)(dataptrarray[4])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_taiut1(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tai1); + double (*_tai2); + double (*_dta); + double (*_ut11); + double (*_ut12); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tai1 = ((double (*))(dataptrarray[0])); + _tai2 = ((double (*))(dataptrarray[1])); + _dta = ((double (*))(dataptrarray[2])); + _ut11 = ((double (*))(dataptrarray[3])); + _ut12 = ((double (*))(dataptrarray[4])); + + _c_retval = eraTaiut1(*_tai1, *_tai2, *_dta, _ut11, _ut12); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_taiutc(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tai1); + double (*_tai2); + double (*_utc1); + double (*_utc2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tai1 = ((double (*))(dataptrarray[0])); + _tai2 = ((double (*))(dataptrarray[1])); + _utc1 = ((double (*))(dataptrarray[2])); + _utc2 = ((double (*))(dataptrarray[3])); + + _c_retval = eraTaiutc(*_tai1, *_tai2, _utc1, _utc2); + *((int *)(dataptrarray[4])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_tcbtdb(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tcb1); + double (*_tcb2); + double (*_tdb1); + double (*_tdb2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tcb1 = ((double (*))(dataptrarray[0])); + _tcb2 = ((double (*))(dataptrarray[1])); + _tdb1 = ((double (*))(dataptrarray[2])); + _tdb2 = ((double (*))(dataptrarray[3])); + + _c_retval = eraTcbtdb(*_tcb1, *_tcb2, _tdb1, _tdb2); + *((int *)(dataptrarray[4])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_tcgtt(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tcg1); + double (*_tcg2); + double (*_tt1); + double (*_tt2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tcg1 = ((double (*))(dataptrarray[0])); + _tcg2 = ((double (*))(dataptrarray[1])); + _tt1 = ((double (*))(dataptrarray[2])); + _tt2 = ((double (*))(dataptrarray[3])); + + _c_retval = eraTcgtt(*_tcg1, *_tcg2, _tt1, _tt2); + *((int *)(dataptrarray[4])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_tdbtcb(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tdb1); + double (*_tdb2); + double (*_tcb1); + double (*_tcb2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tdb1 = ((double (*))(dataptrarray[0])); + _tdb2 = ((double (*))(dataptrarray[1])); + _tcb1 = ((double (*))(dataptrarray[2])); + _tcb2 = ((double (*))(dataptrarray[3])); + + _c_retval = eraTdbtcb(*_tdb1, *_tdb2, _tcb1, _tcb2); + *((int *)(dataptrarray[4])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_tdbtt(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tdb1); + double (*_tdb2); + double (*_dtr); + double (*_tt1); + double (*_tt2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tdb1 = ((double (*))(dataptrarray[0])); + _tdb2 = ((double (*))(dataptrarray[1])); + _dtr = ((double (*))(dataptrarray[2])); + _tt1 = ((double (*))(dataptrarray[3])); + _tt2 = ((double (*))(dataptrarray[4])); + + _c_retval = eraTdbtt(*_tdb1, *_tdb2, *_dtr, _tt1, _tt2); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_tttai(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tt1); + double (*_tt2); + double (*_tai1); + double (*_tai2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tt1 = ((double (*))(dataptrarray[0])); + _tt2 = ((double (*))(dataptrarray[1])); + _tai1 = ((double (*))(dataptrarray[2])); + _tai2 = ((double (*))(dataptrarray[3])); + + _c_retval = eraTttai(*_tt1, *_tt2, _tai1, _tai2); + *((int *)(dataptrarray[4])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_tttcg(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tt1); + double (*_tt2); + double (*_tcg1); + double (*_tcg2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tt1 = ((double (*))(dataptrarray[0])); + _tt2 = ((double (*))(dataptrarray[1])); + _tcg1 = ((double (*))(dataptrarray[2])); + _tcg2 = ((double (*))(dataptrarray[3])); + + _c_retval = eraTttcg(*_tt1, *_tt2, _tcg1, _tcg2); + *((int *)(dataptrarray[4])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_tttdb(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tt1); + double (*_tt2); + double (*_dtr); + double (*_tdb1); + double (*_tdb2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tt1 = ((double (*))(dataptrarray[0])); + _tt2 = ((double (*))(dataptrarray[1])); + _dtr = ((double (*))(dataptrarray[2])); + _tdb1 = ((double (*))(dataptrarray[3])); + _tdb2 = ((double (*))(dataptrarray[4])); + + _c_retval = eraTttdb(*_tt1, *_tt2, *_dtr, _tdb1, _tdb2); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_ttut1(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_tt1); + double (*_tt2); + double (*_dt); + double (*_ut11); + double (*_ut12); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _tt1 = ((double (*))(dataptrarray[0])); + _tt2 = ((double (*))(dataptrarray[1])); + _dt = ((double (*))(dataptrarray[2])); + _ut11 = ((double (*))(dataptrarray[3])); + _ut12 = ((double (*))(dataptrarray[4])); + + _c_retval = eraTtut1(*_tt1, *_tt2, *_dt, _ut11, _ut12); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_ut1tai(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_ut11); + double (*_ut12); + double (*_dta); + double (*_tai1); + double (*_tai2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ut11 = ((double (*))(dataptrarray[0])); + _ut12 = ((double (*))(dataptrarray[1])); + _dta = ((double (*))(dataptrarray[2])); + _tai1 = ((double (*))(dataptrarray[3])); + _tai2 = ((double (*))(dataptrarray[4])); + + _c_retval = eraUt1tai(*_ut11, *_ut12, *_dta, _tai1, _tai2); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_ut1tt(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_ut11); + double (*_ut12); + double (*_dt); + double (*_tt1); + double (*_tt2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ut11 = ((double (*))(dataptrarray[0])); + _ut12 = ((double (*))(dataptrarray[1])); + _dt = ((double (*))(dataptrarray[2])); + _tt1 = ((double (*))(dataptrarray[3])); + _tt2 = ((double (*))(dataptrarray[4])); + + _c_retval = eraUt1tt(*_ut11, *_ut12, *_dt, _tt1, _tt2); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_ut1utc(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_ut11); + double (*_ut12); + double (*_dut1); + double (*_utc1); + double (*_utc2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ut11 = ((double (*))(dataptrarray[0])); + _ut12 = ((double (*))(dataptrarray[1])); + _dut1 = ((double (*))(dataptrarray[2])); + _utc1 = ((double (*))(dataptrarray[3])); + _utc2 = ((double (*))(dataptrarray[4])); + + _c_retval = eraUt1utc(*_ut11, *_ut12, *_dut1, _utc1, _utc2); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_utctai(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_utc1); + double (*_utc2); + double (*_tai1); + double (*_tai2); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _utc1 = ((double (*))(dataptrarray[0])); + _utc2 = ((double (*))(dataptrarray[1])); + _tai1 = ((double (*))(dataptrarray[2])); + _tai2 = ((double (*))(dataptrarray[3])); + + _c_retval = eraUtctai(*_utc1, *_utc2, _tai1, _tai2); + *((int *)(dataptrarray[4])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_utcut1(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_utc1); + double (*_utc2); + double (*_dut1); + double (*_ut11); + double (*_ut12); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _utc1 = ((double (*))(dataptrarray[0])); + _utc2 = ((double (*))(dataptrarray[1])); + _dut1 = ((double (*))(dataptrarray[2])); + _ut11 = ((double (*))(dataptrarray[3])); + _ut12 = ((double (*))(dataptrarray[4])); + + _c_retval = eraUtcut1(*_utc1, *_utc2, *_dut1, _ut11, _ut12); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_a2af(PyObject *self, PyObject *args, PyObject *kwds) +{ + int (*_ndp); + double (*_angle); + char (*_sign); + int (*_idmsf)[4]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ndp = ((int (*))(dataptrarray[0])); + _angle = ((double (*))(dataptrarray[1])); + _sign = ((char (*))(dataptrarray[2])); + _idmsf = ((int (*)[4])(dataptrarray[3])); + + eraA2af(*_ndp, *_angle, _sign, *_idmsf); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_a2tf(PyObject *self, PyObject *args, PyObject *kwds) +{ + int (*_ndp); + double (*_angle); + char (*_sign); + int (*_ihmsf)[4]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ndp = ((int (*))(dataptrarray[0])); + _angle = ((double (*))(dataptrarray[1])); + _sign = ((char (*))(dataptrarray[2])); + _ihmsf = ((int (*)[4])(dataptrarray[3])); + + eraA2tf(*_ndp, *_angle, _sign, *_ihmsf); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_af2a(PyObject *self, PyObject *args, PyObject *kwds) +{ + char (*_s); + int (*_ideg); + int (*_iamin); + double (*_asec); + double (*_rad); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _s = ((char (*))(dataptrarray[0])); + _ideg = ((int (*))(dataptrarray[1])); + _iamin = ((int (*))(dataptrarray[2])); + _asec = ((double (*))(dataptrarray[3])); + _rad = ((double (*))(dataptrarray[4])); + + _c_retval = eraAf2a(*_s, *_ideg, *_iamin, *_asec, _rad); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_anp(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_a); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _a = ((double (*))(dataptrarray[0])); + + _c_retval = eraAnp(*_a); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_anpm(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_a); + double _c_retval; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _a = ((double (*))(dataptrarray[0])); + + _c_retval = eraAnpm(*_a); + *((double *)(dataptrarray[1])) = _c_retval; + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_d2tf(PyObject *self, PyObject *args, PyObject *kwds) +{ + int (*_ndp); + double (*_days); + char (*_sign); + int (*_ihmsf)[4]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _ndp = ((int (*))(dataptrarray[0])); + _days = ((double (*))(dataptrarray[1])); + _sign = ((char (*))(dataptrarray[2])); + _ihmsf = ((int (*)[4])(dataptrarray[3])); + + eraD2tf(*_ndp, *_days, _sign, *_ihmsf); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_tf2a(PyObject *self, PyObject *args, PyObject *kwds) +{ + char (*_s); + int (*_ihour); + int (*_imin); + double (*_sec); + double (*_rad); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _s = ((char (*))(dataptrarray[0])); + _ihour = ((int (*))(dataptrarray[1])); + _imin = ((int (*))(dataptrarray[2])); + _sec = ((double (*))(dataptrarray[3])); + _rad = ((double (*))(dataptrarray[4])); + + _c_retval = eraTf2a(*_s, *_ihour, *_imin, *_sec, _rad); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_tf2d(PyObject *self, PyObject *args, PyObject *kwds) +{ + char (*_s); + int (*_ihour); + int (*_imin); + double (*_sec); + double (*_days); + int _c_retval; + int stat_ok = 1; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _s = ((char (*))(dataptrarray[0])); + _ihour = ((int (*))(dataptrarray[1])); + _imin = ((int (*))(dataptrarray[2])); + _sec = ((double (*))(dataptrarray[3])); + _days = ((double (*))(dataptrarray[4])); + + _c_retval = eraTf2d(*_s, *_ihour, *_imin, *_sec, _days); + *((int *)(dataptrarray[5])) = _c_retval; + if (_c_retval) { + stat_ok = 0; + } + } while (iternext(it)); + + Py_END_ALLOW_THREADS + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +} + +static PyObject *Py_rxp(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_r)[3][3]; + double (*_p)[3]; + double (*_rp)[3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _r = ((double (*)[3][3])(dataptrarray[0])); + _p = ((double (*)[3])(dataptrarray[1])); + _rp = ((double (*)[3])(dataptrarray[2])); + + eraRxp(*_r, *_p, *_rp); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_rxpv(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_r)[3][3]; + double (*_pv)[2][3]; + double (*_rpv)[2][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _r = ((double (*)[3][3])(dataptrarray[0])); + _pv = ((double (*)[2][3])(dataptrarray[1])); + _rpv = ((double (*)[2][3])(dataptrarray[2])); + + eraRxpv(*_r, *_pv, *_rpv); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_trxp(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_r)[3][3]; + double (*_p)[3]; + double (*_trp)[3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _r = ((double (*)[3][3])(dataptrarray[0])); + _p = ((double (*)[3])(dataptrarray[1])); + _trp = ((double (*)[3])(dataptrarray[2])); + + eraTrxp(*_r, *_p, *_trp); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_trxpv(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_r)[3][3]; + double (*_pv)[2][3]; + double (*_trpv)[2][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _r = ((double (*)[3][3])(dataptrarray[0])); + _pv = ((double (*)[2][3])(dataptrarray[1])); + _trpv = ((double (*)[2][3])(dataptrarray[2])); + + eraTrxpv(*_r, *_pv, *_trpv); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_c2s(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_p)[3]; + double (*_theta); + double (*_phi); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _p = ((double (*)[3])(dataptrarray[0])); + _theta = ((double (*))(dataptrarray[1])); + _phi = ((double (*))(dataptrarray[2])); + + eraC2s(*_p, _theta, _phi); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_p2s(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_p)[3]; + double (*_theta); + double (*_phi); + double (*_r); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _p = ((double (*)[3])(dataptrarray[0])); + _theta = ((double (*))(dataptrarray[1])); + _phi = ((double (*))(dataptrarray[2])); + _r = ((double (*))(dataptrarray[3])); + + eraP2s(*_p, _theta, _phi, _r); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_pv2s(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_pv)[2][3]; + double (*_theta); + double (*_phi); + double (*_r); + double (*_td); + double (*_pd); + double (*_rd); + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _pv = ((double (*)[2][3])(dataptrarray[0])); + _theta = ((double (*))(dataptrarray[1])); + _phi = ((double (*))(dataptrarray[2])); + _r = ((double (*))(dataptrarray[3])); + _td = ((double (*))(dataptrarray[4])); + _pd = ((double (*))(dataptrarray[5])); + _rd = ((double (*))(dataptrarray[6])); + + eraPv2s(*_pv, _theta, _phi, _r, _td, _pd, _rd); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_s2c(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_theta); + double (*_phi); + double (*_c)[3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _theta = ((double (*))(dataptrarray[0])); + _phi = ((double (*))(dataptrarray[1])); + _c = ((double (*)[3])(dataptrarray[2])); + + eraS2c(*_theta, *_phi, *_c); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_s2p(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_theta); + double (*_phi); + double (*_r); + double (*_p)[3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _theta = ((double (*))(dataptrarray[0])); + _phi = ((double (*))(dataptrarray[1])); + _r = ((double (*))(dataptrarray[2])); + _p = ((double (*)[3])(dataptrarray[3])); + + eraS2p(*_theta, *_phi, *_r, *_p); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyObject *Py_s2pv(PyObject *self, PyObject *args, PyObject *kwds) +{ + double (*_theta); + double (*_phi); + double (*_r); + double (*_td); + double (*_pd); + double (*_rd); + double (*_pv)[2][3]; + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + _theta = ((double (*))(dataptrarray[0])); + _phi = ((double (*))(dataptrarray[1])); + _r = ((double (*))(dataptrarray[2])); + _td = ((double (*))(dataptrarray[3])); + _pd = ((double (*))(dataptrarray[4])); + _rd = ((double (*))(dataptrarray[5])); + _pv = ((double (*)[2][3])(dataptrarray[6])); + + eraS2pv(*_theta, *_phi, *_r, *_td, *_pd, *_rd, *_pv); + } while (iternext(it)); + + Py_END_ALLOW_THREADS + Py_RETURN_NONE; +} + +static PyMethodDef module_functions[] = { + { "_" "cal2jd", (PyCFunction)Py_cal2jd, METH_O, NULL }, + { "_" "epb", (PyCFunction)Py_epb, METH_O, NULL }, + { "_" "epb2jd", (PyCFunction)Py_epb2jd, METH_O, NULL }, + { "_" "epj", (PyCFunction)Py_epj, METH_O, NULL }, + { "_" "epj2jd", (PyCFunction)Py_epj2jd, METH_O, NULL }, + { "_" "jd2cal", (PyCFunction)Py_jd2cal, METH_O, NULL }, + { "_" "jdcalf", (PyCFunction)Py_jdcalf, METH_O, NULL }, + { "_" "ab", (PyCFunction)Py_ab, METH_O, NULL }, + { "_" "apcg", (PyCFunction)Py_apcg, METH_O, NULL }, + { "_" "apcg13", (PyCFunction)Py_apcg13, METH_O, NULL }, + { "_" "apci", (PyCFunction)Py_apci, METH_O, NULL }, + { "_" "apci13", (PyCFunction)Py_apci13, METH_O, NULL }, + { "_" "apco", (PyCFunction)Py_apco, METH_O, NULL }, + { "_" "apco13", (PyCFunction)Py_apco13, METH_O, NULL }, + { "_" "apcs", (PyCFunction)Py_apcs, METH_O, NULL }, + { "_" "apcs13", (PyCFunction)Py_apcs13, METH_O, NULL }, + { "_" "aper", (PyCFunction)Py_aper, METH_O, NULL }, + { "_" "aper13", (PyCFunction)Py_aper13, METH_O, NULL }, + { "_" "apio", (PyCFunction)Py_apio, METH_O, NULL }, + { "_" "apio13", (PyCFunction)Py_apio13, METH_O, NULL }, + { "_" "atci13", (PyCFunction)Py_atci13, METH_O, NULL }, + { "_" "atciq", (PyCFunction)Py_atciq, METH_O, NULL }, + { "_" "atciqn", (PyCFunction)Py_atciqn, METH_O, NULL }, + { "_" "atciqz", (PyCFunction)Py_atciqz, METH_O, NULL }, + { "_" "atco13", (PyCFunction)Py_atco13, METH_O, NULL }, + { "_" "atic13", (PyCFunction)Py_atic13, METH_O, NULL }, + { "_" "aticq", (PyCFunction)Py_aticq, METH_O, NULL }, + { "_" "aticqn", (PyCFunction)Py_aticqn, METH_O, NULL }, + { "_" "atio13", (PyCFunction)Py_atio13, METH_O, NULL }, + { "_" "atioq", (PyCFunction)Py_atioq, METH_O, NULL }, + { "_" "atoc13", (PyCFunction)Py_atoc13, METH_O, NULL }, + { "_" "atoi13", (PyCFunction)Py_atoi13, METH_O, NULL }, + { "_" "atoiq", (PyCFunction)Py_atoiq, METH_O, NULL }, + { "_" "ld", (PyCFunction)Py_ld, METH_O, NULL }, + { "_" "ldn", (PyCFunction)Py_ldn, METH_O, NULL }, + { "_" "ldsun", (PyCFunction)Py_ldsun, METH_O, NULL }, + { "_" "pmpx", (PyCFunction)Py_pmpx, METH_O, NULL }, + { "_" "pmsafe", (PyCFunction)Py_pmsafe, METH_O, NULL }, + { "_" "pvtob", (PyCFunction)Py_pvtob, METH_O, NULL }, + { "_" "refco", (PyCFunction)Py_refco, METH_O, NULL }, + { "_" "epv00", (PyCFunction)Py_epv00, METH_O, NULL }, + { "_" "plan94", (PyCFunction)Py_plan94, METH_O, NULL }, + { "_" "fad03", (PyCFunction)Py_fad03, METH_O, NULL }, + { "_" "fae03", (PyCFunction)Py_fae03, METH_O, NULL }, + { "_" "faf03", (PyCFunction)Py_faf03, METH_O, NULL }, + { "_" "faju03", (PyCFunction)Py_faju03, METH_O, NULL }, + { "_" "fal03", (PyCFunction)Py_fal03, METH_O, NULL }, + { "_" "falp03", (PyCFunction)Py_falp03, METH_O, NULL }, + { "_" "fama03", (PyCFunction)Py_fama03, METH_O, NULL }, + { "_" "fame03", (PyCFunction)Py_fame03, METH_O, NULL }, + { "_" "fane03", (PyCFunction)Py_fane03, METH_O, NULL }, + { "_" "faom03", (PyCFunction)Py_faom03, METH_O, NULL }, + { "_" "fapa03", (PyCFunction)Py_fapa03, METH_O, NULL }, + { "_" "fasa03", (PyCFunction)Py_fasa03, METH_O, NULL }, + { "_" "faur03", (PyCFunction)Py_faur03, METH_O, NULL }, + { "_" "fave03", (PyCFunction)Py_fave03, METH_O, NULL }, + { "_" "bi00", (PyCFunction)Py_bi00, METH_O, NULL }, + { "_" "bp00", (PyCFunction)Py_bp00, METH_O, NULL }, + { "_" "bp06", (PyCFunction)Py_bp06, METH_O, NULL }, + { "_" "bpn2xy", (PyCFunction)Py_bpn2xy, METH_O, NULL }, + { "_" "c2i00a", (PyCFunction)Py_c2i00a, METH_O, NULL }, + { "_" "c2i00b", (PyCFunction)Py_c2i00b, METH_O, NULL }, + { "_" "c2i06a", (PyCFunction)Py_c2i06a, METH_O, NULL }, + { "_" "c2ibpn", (PyCFunction)Py_c2ibpn, METH_O, NULL }, + { "_" "c2ixy", (PyCFunction)Py_c2ixy, METH_O, NULL }, + { "_" "c2ixys", (PyCFunction)Py_c2ixys, METH_O, NULL }, + { "_" "c2t00a", (PyCFunction)Py_c2t00a, METH_O, NULL }, + { "_" "c2t00b", (PyCFunction)Py_c2t00b, METH_O, NULL }, + { "_" "c2t06a", (PyCFunction)Py_c2t06a, METH_O, NULL }, + { "_" "c2tcio", (PyCFunction)Py_c2tcio, METH_O, NULL }, + { "_" "c2teqx", (PyCFunction)Py_c2teqx, METH_O, NULL }, + { "_" "c2tpe", (PyCFunction)Py_c2tpe, METH_O, NULL }, + { "_" "c2txy", (PyCFunction)Py_c2txy, METH_O, NULL }, + { "_" "eo06a", (PyCFunction)Py_eo06a, METH_O, NULL }, + { "_" "eors", (PyCFunction)Py_eors, METH_O, NULL }, + { "_" "fw2m", (PyCFunction)Py_fw2m, METH_O, NULL }, + { "_" "fw2xy", (PyCFunction)Py_fw2xy, METH_O, NULL }, + { "_" "ltp", (PyCFunction)Py_ltp, METH_O, NULL }, + { "_" "ltpb", (PyCFunction)Py_ltpb, METH_O, NULL }, + { "_" "ltpecl", (PyCFunction)Py_ltpecl, METH_O, NULL }, + { "_" "ltpequ", (PyCFunction)Py_ltpequ, METH_O, NULL }, + { "_" "num00a", (PyCFunction)Py_num00a, METH_O, NULL }, + { "_" "num00b", (PyCFunction)Py_num00b, METH_O, NULL }, + { "_" "num06a", (PyCFunction)Py_num06a, METH_O, NULL }, + { "_" "numat", (PyCFunction)Py_numat, METH_O, NULL }, + { "_" "nut00a", (PyCFunction)Py_nut00a, METH_O, NULL }, + { "_" "nut00b", (PyCFunction)Py_nut00b, METH_O, NULL }, + { "_" "nut06a", (PyCFunction)Py_nut06a, METH_O, NULL }, + { "_" "nut80", (PyCFunction)Py_nut80, METH_O, NULL }, + { "_" "nutm80", (PyCFunction)Py_nutm80, METH_O, NULL }, + { "_" "obl06", (PyCFunction)Py_obl06, METH_O, NULL }, + { "_" "obl80", (PyCFunction)Py_obl80, METH_O, NULL }, + { "_" "p06e", (PyCFunction)Py_p06e, METH_O, NULL }, + { "_" "pb06", (PyCFunction)Py_pb06, METH_O, NULL }, + { "_" "pfw06", (PyCFunction)Py_pfw06, METH_O, NULL }, + { "_" "pmat00", (PyCFunction)Py_pmat00, METH_O, NULL }, + { "_" "pmat06", (PyCFunction)Py_pmat06, METH_O, NULL }, + { "_" "pmat76", (PyCFunction)Py_pmat76, METH_O, NULL }, + { "_" "pn00", (PyCFunction)Py_pn00, METH_O, NULL }, + { "_" "pn00a", (PyCFunction)Py_pn00a, METH_O, NULL }, + { "_" "pn00b", (PyCFunction)Py_pn00b, METH_O, NULL }, + { "_" "pn06", (PyCFunction)Py_pn06, METH_O, NULL }, + { "_" "pn06a", (PyCFunction)Py_pn06a, METH_O, NULL }, + { "_" "pnm00a", (PyCFunction)Py_pnm00a, METH_O, NULL }, + { "_" "pnm00b", (PyCFunction)Py_pnm00b, METH_O, NULL }, + { "_" "pnm06a", (PyCFunction)Py_pnm06a, METH_O, NULL }, + { "_" "pnm80", (PyCFunction)Py_pnm80, METH_O, NULL }, + { "_" "pom00", (PyCFunction)Py_pom00, METH_O, NULL }, + { "_" "pr00", (PyCFunction)Py_pr00, METH_O, NULL }, + { "_" "prec76", (PyCFunction)Py_prec76, METH_O, NULL }, + { "_" "s00", (PyCFunction)Py_s00, METH_O, NULL }, + { "_" "s00a", (PyCFunction)Py_s00a, METH_O, NULL }, + { "_" "s00b", (PyCFunction)Py_s00b, METH_O, NULL }, + { "_" "s06", (PyCFunction)Py_s06, METH_O, NULL }, + { "_" "s06a", (PyCFunction)Py_s06a, METH_O, NULL }, + { "_" "sp00", (PyCFunction)Py_sp00, METH_O, NULL }, + { "_" "xy06", (PyCFunction)Py_xy06, METH_O, NULL }, + { "_" "xys00a", (PyCFunction)Py_xys00a, METH_O, NULL }, + { "_" "xys00b", (PyCFunction)Py_xys00b, METH_O, NULL }, + { "_" "xys06a", (PyCFunction)Py_xys06a, METH_O, NULL }, + { "_" "ee00", (PyCFunction)Py_ee00, METH_O, NULL }, + { "_" "ee00a", (PyCFunction)Py_ee00a, METH_O, NULL }, + { "_" "ee00b", (PyCFunction)Py_ee00b, METH_O, NULL }, + { "_" "ee06a", (PyCFunction)Py_ee06a, METH_O, NULL }, + { "_" "eect00", (PyCFunction)Py_eect00, METH_O, NULL }, + { "_" "eqeq94", (PyCFunction)Py_eqeq94, METH_O, NULL }, + { "_" "era00", (PyCFunction)Py_era00, METH_O, NULL }, + { "_" "gmst00", (PyCFunction)Py_gmst00, METH_O, NULL }, + { "_" "gmst06", (PyCFunction)Py_gmst06, METH_O, NULL }, + { "_" "gmst82", (PyCFunction)Py_gmst82, METH_O, NULL }, + { "_" "gst00a", (PyCFunction)Py_gst00a, METH_O, NULL }, + { "_" "gst00b", (PyCFunction)Py_gst00b, METH_O, NULL }, + { "_" "gst06", (PyCFunction)Py_gst06, METH_O, NULL }, + { "_" "gst06a", (PyCFunction)Py_gst06a, METH_O, NULL }, + { "_" "gst94", (PyCFunction)Py_gst94, METH_O, NULL }, + { "_" "pvstar", (PyCFunction)Py_pvstar, METH_O, NULL }, + { "_" "starpv", (PyCFunction)Py_starpv, METH_O, NULL }, + { "_" "fk52h", (PyCFunction)Py_fk52h, METH_O, NULL }, + { "_" "fk5hip", (PyCFunction)Py_fk5hip, METH_O, NULL }, + { "_" "fk5hz", (PyCFunction)Py_fk5hz, METH_O, NULL }, + { "_" "h2fk5", (PyCFunction)Py_h2fk5, METH_O, NULL }, + { "_" "hfk5z", (PyCFunction)Py_hfk5z, METH_O, NULL }, + { "_" "starpm", (PyCFunction)Py_starpm, METH_O, NULL }, + { "_" "eceq06", (PyCFunction)Py_eceq06, METH_O, NULL }, + { "_" "ecm06", (PyCFunction)Py_ecm06, METH_O, NULL }, + { "_" "eqec06", (PyCFunction)Py_eqec06, METH_O, NULL }, + { "_" "lteceq", (PyCFunction)Py_lteceq, METH_O, NULL }, + { "_" "ltecm", (PyCFunction)Py_ltecm, METH_O, NULL }, + { "_" "lteqec", (PyCFunction)Py_lteqec, METH_O, NULL }, + { "_" "g2icrs", (PyCFunction)Py_g2icrs, METH_O, NULL }, + { "_" "icrs2g", (PyCFunction)Py_icrs2g, METH_O, NULL }, + { "_" "eform", (PyCFunction)Py_eform, METH_O, NULL }, + { "_" "gc2gd", (PyCFunction)Py_gc2gd, METH_O, NULL }, + { "_" "gc2gde", (PyCFunction)Py_gc2gde, METH_O, NULL }, + { "_" "gd2gc", (PyCFunction)Py_gd2gc, METH_O, NULL }, + { "_" "gd2gce", (PyCFunction)Py_gd2gce, METH_O, NULL }, + { "_" "d2dtf", (PyCFunction)Py_d2dtf, METH_O, NULL }, + { "_" "dat", (PyCFunction)Py_dat, METH_O, NULL }, + { "_" "dtdb", (PyCFunction)Py_dtdb, METH_O, NULL }, + { "_" "dtf2d", (PyCFunction)Py_dtf2d, METH_O, NULL }, + { "_" "taitt", (PyCFunction)Py_taitt, METH_O, NULL }, + { "_" "taiut1", (PyCFunction)Py_taiut1, METH_O, NULL }, + { "_" "taiutc", (PyCFunction)Py_taiutc, METH_O, NULL }, + { "_" "tcbtdb", (PyCFunction)Py_tcbtdb, METH_O, NULL }, + { "_" "tcgtt", (PyCFunction)Py_tcgtt, METH_O, NULL }, + { "_" "tdbtcb", (PyCFunction)Py_tdbtcb, METH_O, NULL }, + { "_" "tdbtt", (PyCFunction)Py_tdbtt, METH_O, NULL }, + { "_" "tttai", (PyCFunction)Py_tttai, METH_O, NULL }, + { "_" "tttcg", (PyCFunction)Py_tttcg, METH_O, NULL }, + { "_" "tttdb", (PyCFunction)Py_tttdb, METH_O, NULL }, + { "_" "ttut1", (PyCFunction)Py_ttut1, METH_O, NULL }, + { "_" "ut1tai", (PyCFunction)Py_ut1tai, METH_O, NULL }, + { "_" "ut1tt", (PyCFunction)Py_ut1tt, METH_O, NULL }, + { "_" "ut1utc", (PyCFunction)Py_ut1utc, METH_O, NULL }, + { "_" "utctai", (PyCFunction)Py_utctai, METH_O, NULL }, + { "_" "utcut1", (PyCFunction)Py_utcut1, METH_O, NULL }, + { "_" "a2af", (PyCFunction)Py_a2af, METH_O, NULL }, + { "_" "a2tf", (PyCFunction)Py_a2tf, METH_O, NULL }, + { "_" "af2a", (PyCFunction)Py_af2a, METH_O, NULL }, + { "_" "anp", (PyCFunction)Py_anp, METH_O, NULL }, + { "_" "anpm", (PyCFunction)Py_anpm, METH_O, NULL }, + { "_" "d2tf", (PyCFunction)Py_d2tf, METH_O, NULL }, + { "_" "tf2a", (PyCFunction)Py_tf2a, METH_O, NULL }, + { "_" "tf2d", (PyCFunction)Py_tf2d, METH_O, NULL }, + { "_" "rxp", (PyCFunction)Py_rxp, METH_O, NULL }, + { "_" "rxpv", (PyCFunction)Py_rxpv, METH_O, NULL }, + { "_" "trxp", (PyCFunction)Py_trxp, METH_O, NULL }, + { "_" "trxpv", (PyCFunction)Py_trxpv, METH_O, NULL }, + { "_" "c2s", (PyCFunction)Py_c2s, METH_O, NULL }, + { "_" "p2s", (PyCFunction)Py_p2s, METH_O, NULL }, + { "_" "pv2s", (PyCFunction)Py_pv2s, METH_O, NULL }, + { "_" "s2c", (PyCFunction)Py_s2c, METH_O, NULL }, + { "_" "s2p", (PyCFunction)Py_s2p, METH_O, NULL }, + { "_" "s2pv", (PyCFunction)Py_s2pv, METH_O, NULL }, + { NULL } +}; + +struct module_state +{ + int _dummy; +}; + +#if PY3K + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_core", + MODULE_DOCSTRING, + sizeof(struct module_state), + module_functions, + NULL, + NULL, + NULL, + NULL +}; + +#define INITERROR return NULL + +PyMODINIT_FUNC PyInit__core(void) + +#else +#define INITERROR return + +PyMODINIT_FUNC init_core(void) +#endif + +{ + PyObject *m; + +#if PY3K + m = PyModule_Create(&moduledef); +#else + m = Py_InitModule3("_core", module_functions, MODULE_DOCSTRING); +#endif + + if (m == NULL) { + INITERROR; + } + + import_array(); + +#if PY3K + return m; +#endif +} \ No newline at end of file diff --git a/astropy/_erfa/core.c.templ b/astropy/_erfa/core.c.templ new file mode 100644 index 0000000..75f9b58 --- /dev/null +++ b/astropy/_erfa/core.c.templ @@ -0,0 +1,144 @@ +/* -*- mode: c -*- */ + +/* Licensed under a 3-clause BSD style license - see LICENSE.rst */ + +/* "core.c" is auto-generated by erfa_generator.py from the template + "core.c.templ". Do *not* edit "core.c" directly, instead edit + "core.c.templ" and run erfa_generator.py from the source directory to + update it. */ + + +#include +#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION +#include +#include "erfa.h" + + +#if PY_MAJOR_VERSION >= 3 +#define PY3K 1 +#else +#define PY3K 0 +#endif + + +typedef struct { + PyObject_HEAD + NpyIter *iter; +} _NpyIterObject; + + +#define MODULE_DOCSTRING \ + "This module contains the C part of the ERFA python wrappers.\n" \ + "This implements only the inner iterator loops, while the heavy lifting\n" \ + "happens in Python in core.py\n\n" \ + "For more about the module and how to use it, see the ``core.py``\n" \ + "docstrings." + + +{%- for func in funcs %} + +static PyObject *Py_{{ func.pyname }}(PyObject *self, PyObject *args, PyObject *kwds) +{ + {%- for arg in func.args_by_inout('in|inout|out') %} + {{ arg.ctype }} (*_{{ arg.name }}){{ arg.cshape }}; + {%- endfor %} + {%- for arg in func.args_by_inout('ret|stat') %} + {{ arg.ctype_ptr }} _{{ arg.name }}; + {%- endfor %} + {%- if func.args_by_inout('stat')|length > 0 %} + int stat_ok = 1; + {%- endif %} + NpyIter *it = ((_NpyIterObject *)args)->iter; + char **dataptrarray = NpyIter_GetDataPtrArray(it); + NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL); + + Py_BEGIN_ALLOW_THREADS + + do { + {%- for arg in func.args_by_inout('in|inout|out') %} + _{{ arg.name }} = (({{ arg.ctype }} (*){{ arg.cshape }})(dataptrarray[{{ func.args.index(arg) }}])); + {%- endfor %} + + {{ func.args_by_inout('ret|stat')|map(attribute='name')|surround('_', ' = ')|join }}{{func.name}}({{ func.args_by_inout('in|inout|out')|map(attribute='name_for_call')|join(', ') }}); + + {%- for arg in func.args_by_inout('ret|stat') %} + *(({{ arg.ctype_ptr }} *)(dataptrarray[{{ func.args.index(arg) }}])) = _{{ arg.name }}; + {%- endfor %} + + {%- for arg in func.args_by_inout('stat') %} + if (_{{ arg.name }}) { + stat_ok = 0; + } + {%- endfor %} + } while (iternext(it)); + + Py_END_ALLOW_THREADS + + {%- if func.args_by_inout('stat')|length > 0 %} + if (stat_ok) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } + {%- else %} + Py_RETURN_NONE; + {%- endif %} +} + +{%- endfor %} + +static PyMethodDef module_functions[] = { + {%- for func in funcs %} + { "_" "{{ func.pyname }}", (PyCFunction)Py_{{ func.pyname }}, METH_O, NULL }, + {%- endfor %} + { NULL } +}; + +struct module_state +{ + int _dummy; +}; + +#if PY3K + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_core", + MODULE_DOCSTRING, + sizeof(struct module_state), + module_functions, + NULL, + NULL, + NULL, + NULL +}; + +#define INITERROR return NULL + +PyMODINIT_FUNC PyInit__core(void) + +#else +#define INITERROR return + +PyMODINIT_FUNC init_core(void) +#endif + +{ + PyObject *m; + +#if PY3K + m = PyModule_Create(&moduledef); +#else + m = Py_InitModule3("_core", module_functions, MODULE_DOCSTRING); +#endif + + if (m == NULL) { + INITERROR; + } + + import_array(); + +#if PY3K + return m; +#endif +} diff --git a/astropy/_erfa/core.py b/astropy/_erfa/core.py new file mode 100644 index 0000000..d3edeb4 --- /dev/null +++ b/astropy/_erfa/core.py @@ -0,0 +1,22631 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# "core.py" is auto-generated by erfa_generator.py from the template +# "core.py.templ". Do *not* edit "core.py" directly, instead edit +# "core.py.templ" and run erfa_generator.py from the source directory to +# update it. + +""" +This module uses the Python/C API to wrap the ERFA library in +numpy-vectorized equivalents. + +..warning:: + This is currently *not* part of the public Astropy API, and may change in + the future. + + +The key idea is that any function can be called with inputs that are arrays, +and the wrappers will automatically vectorize and call the ERFA functions for +each item using broadcasting rules for numpy. So the return values are always +numpy arrays of some sort. + +For ERFA functions that take/return vectors or matrices, the vector/matrix +dimension(s) are always the *last* dimension(s). For example, if you +want to give ten matrices (i.e., the ERFA input type is double[3][3]), +you would pass in a (10, 3, 3) numpy array. If the output of the ERFA +function is scalar, you'll get back a length-10 1D array. + +Note that the C part of these functions are implemented in a separate +module (compiled as ``_core``), derived from the ``core.c`` file. +Splitting the wrappers into separate pure-python and C portions +dramatically reduces compilation time without notably impacting +performance. (See issue [#3063] on the github repository for more +about this.) +""" +from __future__ import absolute_import, division, print_function + +import warnings + +from ..utils.exceptions import AstropyUserWarning + +import numpy +from . import _core + +# TODO: remove the above variable and the code using it and make_outputs_scalar +# when numpy < 1.8 is no longer supported + +__all__ = ['ErfaError', 'ErfaWarning', + 'cal2jd', 'epb', 'epb2jd', 'epj', 'epj2jd', 'jd2cal', 'jdcalf', 'ab', 'apcg', 'apcg13', 'apci', 'apci13', 'apco', 'apco13', 'apcs', 'apcs13', 'aper', 'aper13', 'apio', 'apio13', 'atci13', 'atciq', 'atciqn', 'atciqz', 'atco13', 'atic13', 'aticq', 'aticqn', 'atio13', 'atioq', 'atoc13', 'atoi13', 'atoiq', 'ld', 'ldn', 'ldsun', 'pmpx', 'pmsafe', 'pvtob', 'refco', 'epv00', 'plan94', 'fad03', 'fae03', 'faf03', 'faju03', 'fal03', 'falp03', 'fama03', 'fame03', 'fane03', 'faom03', 'fapa03', 'fasa03', 'faur03', 'fave03', 'bi00', 'bp00', 'bp06', 'bpn2xy', 'c2i00a', 'c2i00b', 'c2i06a', 'c2ibpn', 'c2ixy', 'c2ixys', 'c2t00a', 'c2t00b', 'c2t06a', 'c2tcio', 'c2teqx', 'c2tpe', 'c2txy', 'eo06a', 'eors', 'fw2m', 'fw2xy', 'ltp', 'ltpb', 'ltpecl', 'ltpequ', 'num00a', 'num00b', 'num06a', 'numat', 'nut00a', 'nut00b', 'nut06a', 'nut80', 'nutm80', 'obl06', 'obl80', 'p06e', 'pb06', 'pfw06', 'pmat00', 'pmat06', 'pmat76', 'pn00', 'pn00a', 'pn00b', 'pn06', 'pn06a', 'pnm00a', 'pnm00b', 'pnm06a', 'pnm80', 'pom00', 'pr00', 'prec76', 's00', 's00a', 's00b', 's06', 's06a', 'sp00', 'xy06', 'xys00a', 'xys00b', 'xys06a', 'ee00', 'ee00a', 'ee00b', 'ee06a', 'eect00', 'eqeq94', 'era00', 'gmst00', 'gmst06', 'gmst82', 'gst00a', 'gst00b', 'gst06', 'gst06a', 'gst94', 'pvstar', 'starpv', 'fk52h', 'fk5hip', 'fk5hz', 'h2fk5', 'hfk5z', 'starpm', 'eceq06', 'ecm06', 'eqec06', 'lteceq', 'ltecm', 'lteqec', 'g2icrs', 'icrs2g', 'eform', 'gc2gd', 'gc2gde', 'gd2gc', 'gd2gce', 'd2dtf', 'dat', 'dtdb', 'dtf2d', 'taitt', 'taiut1', 'taiutc', 'tcbtdb', 'tcgtt', 'tdbtcb', 'tdbtt', 'tttai', 'tttcg', 'tttdb', 'ttut1', 'ut1tai', 'ut1tt', 'ut1utc', 'utctai', 'utcut1', 'a2af', 'a2tf', 'af2a', 'anp', 'anpm', 'd2tf', 'tf2a', 'tf2d', 'rxp', 'rxpv', 'trxp', 'trxpv', 'c2s', 'p2s', 'pv2s', 's2c', 's2p', 's2pv', + 'DPI', 'D2PI', 'DR2D', 'DD2R', 'DR2AS', 'DAS2R', 'DS2R', 'TURNAS', 'DMAS2R', 'DTY', 'DAYSEC', 'DJY', 'DJC', 'DJM', 'DJ00', 'DJM0', 'DJM00', 'DJM77', 'TTMTAI', 'DAU', 'CMPS', 'AULT', 'DC', 'ELG', 'ELB', 'TDB0', 'SRS', 'WGS84', 'GRS80', 'WGS72', + # TODO: delete the functions below when they can get auto-generated + 'version', 'version_major', 'version_minor', 'version_micro', 'sofa_version', + 'dt_eraASTROM', 'dt_eraLDBODY'] + + +# <---------------------------------Error-handling----------------------------> + +class ErfaError(ValueError): + """ + A class for errors triggered by ERFA functions (status codes < 0) + """ + + +class ErfaWarning(AstropyUserWarning): + """ + A class for warnings triggered by ERFA functions (status codes > 0) + """ + + +STATUS_CODES = {} # populated below before each function that returns an int + +# This is a hard-coded list of status codes that need to be remapped, +# such as to turn errors into warnings. +STATUS_CODES_REMAP = { + 'cal2jd': {-3: 3} +} + + +def check_errwarn(statcodes, func_name): + # Remap any errors into warnings in the STATUS_CODES_REMAP dict. + if func_name in STATUS_CODES_REMAP: + for before, after in STATUS_CODES_REMAP[func_name].items(): + statcodes[statcodes == before] = after + STATUS_CODES[func_name][after] = STATUS_CODES[func_name][before] + + if numpy.any(statcodes<0): + # errors present - only report the errors. + if statcodes.shape: + statcodes = statcodes[statcodes<0] + + errcodes = numpy.unique(statcodes) + + errcounts = dict([(e, numpy.sum(statcodes==e)) for e in errcodes]) + + elsemsg = STATUS_CODES[func_name].get('else', None) + if elsemsg is None: + errmsgs = dict([(e, STATUS_CODES[func_name].get(e, 'Return code ' + str(e))) for e in errcodes]) + else: + errmsgs = dict([(e, STATUS_CODES[func_name].get(e, elsemsg)) for e in errcodes]) + + emsg = ', '.join(['{0} of "{1}"'.format(errcounts[e], errmsgs[e]) for e in errcodes]) + raise ErfaError('ERFA function "' + func_name + '" yielded ' + emsg) + + elif numpy.any(statcodes>0): + #only warnings present + if statcodes.shape: + statcodes = statcodes[statcodes>0] + + warncodes = numpy.unique(statcodes) + + warncounts = dict([(w, numpy.sum(statcodes==w)) for w in warncodes]) + + elsemsg = STATUS_CODES[func_name].get('else', None) + if elsemsg is None: + warnmsgs = dict([(w, STATUS_CODES[func_name].get(w, 'Return code ' + str(w))) for w in warncodes]) + else: + warnmsgs = dict([(w, STATUS_CODES[func_name].get(w, elsemsg)) for w in warncodes]) + + wmsg = ', '.join(['{0} of "{1}"'.format(warncounts[w], warnmsgs[w]) for w in warncodes]) + warnings.warn('ERFA function "' + func_name + '" yielded ' + wmsg, ErfaWarning) + + +# <-------------------------trailing shape verification-----------------------> + +def check_trailing_shape(arr, shape, name): + try: + if arr.shape[-len(shape):] != shape: + raise Exception() + except: + raise ValueError("{0} must be of trailing dimensions {1}".format(name, shape)) + +# <--------------------------Actual ERFA-wrapping code------------------------> + +dt_eraASTROM = numpy.dtype([('pmt','d'), + ('eb','d',(3,)), + ('eh','d',(3,)), + ('em','d'), + ('v','d',(3,)), + ('bm1','d'), + ('bpn','d',(3,3)), + ('along','d'), + ('phi','d'), + ('xpl','d'), + ('ypl','d'), + ('sphi','d'), + ('cphi','d'), + ('diurab','d'), + ('eral','d'), + ('refa','d'), + ('refb','d')], align=True) + +dt_eraLDBODY = numpy.dtype([('bm','d'), + ('dl','d'), + ('pv','d',(2,3))], align=True) + + + +DPI = (3.141592653589793238462643) +"""Pi""" +D2PI = (6.283185307179586476925287) +"""2Pi""" +DR2D = (57.29577951308232087679815) +"""Radians to degrees""" +DD2R = (1.745329251994329576923691e-2) +"""Degrees to radians""" +DR2AS = (206264.8062470963551564734) +"""Radians to arcseconds""" +DAS2R = (4.848136811095359935899141e-6) +"""Arcseconds to radians""" +DS2R = (7.272205216643039903848712e-5) +"""Seconds of time to radians""" +TURNAS = (1296000.0) +"""Arcseconds in a full circle""" +DMAS2R = (DAS2R / 1e3) +"""Milliarcseconds to radians""" +DTY = (365.242198781) +"""Length of tropical year B1900 (days)""" +DAYSEC = (86400.0) +"""Seconds per day.""" +DJY = (365.25) +"""Days per Julian year""" +DJC = (36525.0) +"""Days per Julian century""" +DJM = (365250.0) +"""Days per Julian millennium""" +DJ00 = (2451545.0) +"""Reference epoch (J2000.0), Julian Date""" +DJM0 = (2400000.5) +"""Julian Date of Modified Julian Date zero""" +DJM00 = (51544.5) +"""Reference epoch (J2000.0), Modified Julian Date""" +DJM77 = (43144.0) +"""1977 Jan 1.0 as MJD""" +TTMTAI = (32.184) +"""TT minus TAI (s)""" +DAU = (149597870.7e3) +"""Astronomical unit (m, IAU 2012)""" +CMPS = 299792458.0 +"""Speed of light (m/s)""" +AULT = (DAU/CMPS) +"""Light time for 1 au (s)""" +DC = (DAYSEC/AULT) +"""Speed of light (au per day)""" +ELG = (6.969290134e-10) +"""L_G = 1 - d(TT)/d(TCG)""" +ELB = (1.550519768e-8) +"""L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0""" +TDB0 = (-6.55e-5) +"""L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0""" +SRS = 1.97412574336e-8 +"""Schwarzschild radius of the Sun (au) = 2 * 1.32712440041e20 / (2.99792458e8)^2 / 1.49597870700e11""" +WGS84 = 1 +"""Reference ellipsoids""" +GRS80 = 2 +"""Reference ellipsoids""" +WGS72 = 3 +"""Reference ellipsoids""" + + +def cal2jd(iy, im, id): + """ + Wrapper for ERFA function ``eraCal2jd``. + + Parameters + ---------- + iy : int array + im : int array + id : int array + + Returns + ------- + djm0 : double array + djm : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C a l 2 j d + - - - - - - - - - - + + Gregorian Calendar to Julian Date. + + Given: + iy,im,id int year, month, day in Gregorian calendar (Note 1) + + Returned: + djm0 double MJD zero-point: always 2400000.5 + djm double Modified Julian Date for 0 hrs + + Returned (function value): + int status: + 0 = OK + -1 = bad year (Note 3: JD not computed) + -2 = bad month (JD not computed) + -3 = bad day (JD computed) + + Notes: + + 1) The algorithm used is valid from -4800 March 1, but this + implementation rejects dates before -4799 January 1. + + 2) The Julian Date is returned in two pieces, in the usual ERFA + manner, which is designed to preserve time resolution. The + Julian Date is available as a single number by adding djm0 and + djm. + + 3) In early eras the conversion is from the "Proleptic Gregorian + Calendar"; no account is taken of the date(s) of adoption of + the Gregorian Calendar, nor is the AD/BC numbering convention + observed. + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 12.92 (p604). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + iy_in = numpy.array(iy, dtype=numpy.intc, order="C", copy=False, subok=True) + im_in = numpy.array(im, dtype=numpy.intc, order="C", copy=False, subok=True) + id_in = numpy.array(id, dtype=numpy.intc, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), iy_in, im_in, id_in) + djm0_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + djm_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [iy_in, im_in, id_in, djm0_out, djm_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._cal2jd(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'cal2jd') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(djm0_out.shape) > 0 and djm0_out.shape[0] == 1 + djm0_out = djm0_out.reshape(djm0_out.shape[1:]) + assert len(djm_out.shape) > 0 and djm_out.shape[0] == 1 + djm_out = djm_out.reshape(djm_out.shape[1:]) + + return djm0_out, djm_out +STATUS_CODES['cal2jd'] = {0: 'OK', -1: 'bad year (Note 3: JD not computed)', -2: 'bad month (JD not computed)', -3: 'bad day (JD computed)'} + + + +def epb(dj1, dj2): + """ + Wrapper for ERFA function ``eraEpb``. + + Parameters + ---------- + dj1 : double array + dj2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a E p b + - - - - - - - + + Julian Date to Besselian Epoch. + + Given: + dj1,dj2 double Julian Date (see note) + + Returned (function value): + double Besselian Epoch. + + Note: + + The Julian Date is supplied in two pieces, in the usual ERFA + manner, which is designed to preserve time resolution. The + Julian Date is available as a single number by adding dj1 and + dj2. The maximum resolution is achieved if dj1 is 2451545.0 + (J2000.0). + + Reference: + + Lieske, J.H., 1979. Astron.Astrophys., 73, 282. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + dj1_in = numpy.array(dj1, dtype=numpy.double, order="C", copy=False, subok=True) + dj2_in = numpy.array(dj2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), dj1_in, dj2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dj1_in, dj2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._epb(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def epb2jd(epb): + """ + Wrapper for ERFA function ``eraEpb2jd``. + + Parameters + ---------- + epb : double array + + Returns + ------- + djm0 : double array + djm : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a E p b 2 j d + - - - - - - - - - - + + Besselian Epoch to Julian Date. + + Given: + epb double Besselian Epoch (e.g. 1957.3) + + Returned: + djm0 double MJD zero-point: always 2400000.5 + djm double Modified Julian Date + + Note: + + The Julian Date is returned in two pieces, in the usual ERFA + manner, which is designed to preserve time resolution. The + Julian Date is available as a single number by adding djm0 and + djm. + + Reference: + + Lieske, J.H., 1979, Astron.Astrophys. 73, 282. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epb_in = numpy.array(epb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epb_in) + djm0_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + djm_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epb_in, djm0_out, djm_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._epb2jd(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(djm0_out.shape) > 0 and djm0_out.shape[0] == 1 + djm0_out = djm0_out.reshape(djm0_out.shape[1:]) + assert len(djm_out.shape) > 0 and djm_out.shape[0] == 1 + djm_out = djm_out.reshape(djm_out.shape[1:]) + + return djm0_out, djm_out + + +def epj(dj1, dj2): + """ + Wrapper for ERFA function ``eraEpj``. + + Parameters + ---------- + dj1 : double array + dj2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a E p j + - - - - - - - + + Julian Date to Julian Epoch. + + Given: + dj1,dj2 double Julian Date (see note) + + Returned (function value): + double Julian Epoch + + Note: + + The Julian Date is supplied in two pieces, in the usual ERFA + manner, which is designed to preserve time resolution. The + Julian Date is available as a single number by adding dj1 and + dj2. The maximum resolution is achieved if dj1 is 2451545.0 + (J2000.0). + + Reference: + + Lieske, J.H., 1979, Astron.Astrophys. 73, 282. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + dj1_in = numpy.array(dj1, dtype=numpy.double, order="C", copy=False, subok=True) + dj2_in = numpy.array(dj2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), dj1_in, dj2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dj1_in, dj2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._epj(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def epj2jd(epj): + """ + Wrapper for ERFA function ``eraEpj2jd``. + + Parameters + ---------- + epj : double array + + Returns + ------- + djm0 : double array + djm : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a E p j 2 j d + - - - - - - - - - - + + Julian Epoch to Julian Date. + + Given: + epj double Julian Epoch (e.g. 1996.8) + + Returned: + djm0 double MJD zero-point: always 2400000.5 + djm double Modified Julian Date + + Note: + + The Julian Date is returned in two pieces, in the usual ERFA + manner, which is designed to preserve time resolution. The + Julian Date is available as a single number by adding djm0 and + djm. + + Reference: + + Lieske, J.H., 1979, Astron.Astrophys. 73, 282. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in) + djm0_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + djm_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, djm0_out, djm_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._epj2jd(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(djm0_out.shape) > 0 and djm0_out.shape[0] == 1 + djm0_out = djm0_out.reshape(djm0_out.shape[1:]) + assert len(djm_out.shape) > 0 and djm_out.shape[0] == 1 + djm_out = djm_out.reshape(djm_out.shape[1:]) + + return djm0_out, djm_out + + +def jd2cal(dj1, dj2): + """ + Wrapper for ERFA function ``eraJd2cal``. + + Parameters + ---------- + dj1 : double array + dj2 : double array + + Returns + ------- + iy : int array + im : int array + id : int array + fd : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a J d 2 c a l + - - - - - - - - - - + + Julian Date to Gregorian year, month, day, and fraction of a day. + + Given: + dj1,dj2 double Julian Date (Notes 1, 2) + + Returned (arguments): + iy int year + im int month + id int day + fd double fraction of day + + Returned (function value): + int status: + 0 = OK + -1 = unacceptable date (Note 1) + + Notes: + + 1) The earliest valid date is -68569.5 (-4900 March 1). The + largest value accepted is 1e9. + + 2) The Julian Date is apportioned in any convenient way between + the arguments dj1 and dj2. For example, JD=2450123.7 could + be expressed in any of these ways, among others: + + dj1 dj2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + 3) In early eras the conversion is from the "proleptic Gregorian + calendar"; no account is taken of the date(s) of adoption of + the Gregorian calendar, nor is the AD/BC numbering convention + observed. + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 12.92 (p604). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + dj1_in = numpy.array(dj1, dtype=numpy.double, order="C", copy=False, subok=True) + dj2_in = numpy.array(dj2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), dj1_in, dj2_in) + iy_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + im_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + id_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + fd_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dj1_in, dj2_in, iy_out, im_out, id_out, fd_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*5 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._jd2cal(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'jd2cal') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(iy_out.shape) > 0 and iy_out.shape[0] == 1 + iy_out = iy_out.reshape(iy_out.shape[1:]) + assert len(im_out.shape) > 0 and im_out.shape[0] == 1 + im_out = im_out.reshape(im_out.shape[1:]) + assert len(id_out.shape) > 0 and id_out.shape[0] == 1 + id_out = id_out.reshape(id_out.shape[1:]) + assert len(fd_out.shape) > 0 and fd_out.shape[0] == 1 + fd_out = fd_out.reshape(fd_out.shape[1:]) + + return iy_out, im_out, id_out, fd_out +STATUS_CODES['jd2cal'] = {0: 'OK', -1: 'unacceptable date (Note 1)'} + + + +def jdcalf(ndp, dj1, dj2): + """ + Wrapper for ERFA function ``eraJdcalf``. + + Parameters + ---------- + ndp : int array + dj1 : double array + dj2 : double array + + Returns + ------- + iymdf : int array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a J d c a l f + - - - - - - - - - - + + Julian Date to Gregorian Calendar, expressed in a form convenient + for formatting messages: rounded to a specified precision. + + Given: + ndp int number of decimal places of days in fraction + dj1,dj2 double dj1+dj2 = Julian Date (Note 1) + + Returned: + iymdf int[4] year, month, day, fraction in Gregorian + calendar + + Returned (function value): + int status: + -1 = date out of range + 0 = OK + +1 = NDP not 0-9 (interpreted as 0) + + Notes: + + 1) The Julian Date is apportioned in any convenient way between + the arguments dj1 and dj2. For example, JD=2450123.7 could + be expressed in any of these ways, among others: + + dj1 dj2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + 2) In early eras the conversion is from the "Proleptic Gregorian + Calendar"; no account is taken of the date(s) of adoption of + the Gregorian Calendar, nor is the AD/BC numbering convention + observed. + + 3) Refer to the function eraJd2cal. + + 4) NDP should be 4 or less if internal overflows are to be + avoided on machines which use 16-bit integers. + + Called: + eraJd2cal JD to Gregorian calendar + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 12.92 (p604). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ndp_in = numpy.array(ndp, dtype=numpy.intc, order="C", copy=False, subok=True) + dj1_in = numpy.array(dj1, dtype=numpy.double, order="C", copy=False, subok=True) + dj2_in = numpy.array(dj2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ndp_in, dj1_in, dj2_in) + iymdf_out = numpy.empty(broadcast.shape + (4,), dtype=numpy.intc) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ndp_in, dj1_in, dj2_in, iymdf_out[...,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._jdcalf(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'jdcalf') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(iymdf_out.shape) > 0 and iymdf_out.shape[0] == 1 + iymdf_out = iymdf_out.reshape(iymdf_out.shape[1:]) + + return iymdf_out +STATUS_CODES['jdcalf'] = {-1: 'date out of range', 0: 'OK', 1: 'NDP not 0-9 (interpreted as 0)'} + + + +def ab(pnat, v, s, bm1): + """ + Wrapper for ERFA function ``eraAb``. + + Parameters + ---------- + pnat : double array + v : double array + s : double array + bm1 : double array + + Returns + ------- + ppr : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - + e r a A b + - - - - - - + + Apply aberration to transform natural direction into proper + direction. + + Given: + pnat double[3] natural direction to the source (unit vector) + v double[3] observer barycentric velocity in units of c + s double distance between the Sun and the observer (au) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + + Returned: + ppr double[3] proper direction to source (unit vector) + + Notes: + + 1) The algorithm is based on Expr. (7.40) in the Explanatory + Supplement (Urban & Seidelmann 2013), but with the following + changes: + + o Rigorous rather than approximate normalization is applied. + + o The gravitational potential term from Expr. (7) in + Klioner (2003) is added, taking into account only the Sun's + contribution. This has a maximum effect of about + 0.4 microarcsecond. + + 2) In almost all cases, the maximum accuracy will be limited by the + supplied velocity. For example, if the ERFA eraEpv00 function is + used, errors of up to 5 microarcseconds could occur. + + References: + + Urban, S. & Seidelmann, P. K. (eds), Explanatory Supplement to + the Astronomical Almanac, 3rd ed., University Science Books + (2013). + + Klioner, Sergei A., "A practical relativistic model for micro- + arcsecond astrometry in space", Astr. J. 125, 1580-1597 (2003). + + Called: + eraPdp scalar product of two p-vectors + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + pnat_in = numpy.array(pnat, dtype=numpy.double, order="C", copy=False, subok=True) + v_in = numpy.array(v, dtype=numpy.double, order="C", copy=False, subok=True) + s_in = numpy.array(s, dtype=numpy.double, order="C", copy=False, subok=True) + bm1_in = numpy.array(bm1, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(pnat_in, (3,), "pnat") + check_trailing_shape(v_in, (3,), "v") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), pnat_in[...,0], v_in[...,0], s_in, bm1_in) + ppr_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [pnat_in[...,0], v_in[...,0], s_in, bm1_in, ppr_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ab(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ppr_out.shape) > 0 and ppr_out.shape[0] == 1 + ppr_out = ppr_out.reshape(ppr_out.shape[1:]) + + return ppr_out + + +def apcg(date1, date2, ebpv, ehp): + """ + Wrapper for ERFA function ``eraApcg``. + + Parameters + ---------- + date1 : double array + date2 : double array + ebpv : double array + ehp : double array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A p c g + - - - - - - - - + + For a geocentric observer, prepare star-independent astrometry + parameters for transformations between ICRS and GCRS coordinates. + The Earth ephemeris is supplied by the caller. + + The parameters produced by this function are required in the + parallax, light deflection and aberration parts of the astrometric + transformation chain. + + Given: + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + ebpv double[2][3] Earth barycentric pos/vel (au, au/day) + ehp double[3] Earth heliocentric position (au) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double unchanged + refa double unchanged + refb double unchanged + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) All the vectors are with respect to BCRS axes. + + 3) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 4) The context structure astrom produced by this function is used by + eraAtciq* and eraAticq*. + + Called: + eraApcs astrometry parameters, ICRS-GCRS, space observer + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + ebpv_in = numpy.array(ebpv, dtype=numpy.double, order="C", copy=False, subok=True) + ehp_in = numpy.array(ehp, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(ebpv_in, (2, 3), "ebpv") + check_trailing_shape(ehp_in, (3,), "ehp") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, ebpv_in[...,0,0], ehp_in[...,0]) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, ebpv_in[...,0,0], ehp_in[...,0], astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apcg(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out + + +def apcg13(date1, date2): + """ + Wrapper for ERFA function ``eraApcg13``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A p c g 1 3 + - - - - - - - - - - + + For a geocentric observer, prepare star-independent astrometry + parameters for transformations between ICRS and GCRS coordinates. + The caller supplies the date, and ERFA models are used to predict + the Earth ephemeris. + + The parameters produced by this function are required in the + parallax, light deflection and aberration parts of the astrometric + transformation chain. + + Given: + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double unchanged + refa double unchanged + refb double unchanged + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) All the vectors are with respect to BCRS axes. + + 3) In cases where the caller wishes to supply his own Earth + ephemeris, the function eraApcg can be used instead of the present + function. + + 4) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 5) The context structure astrom produced by this function is used by + eraAtciq* and eraAticq*. + + Called: + eraEpv00 Earth position and velocity + eraApcg astrometry parameters, ICRS-GCRS, geocenter + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apcg13(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out + + +def apci(date1, date2, ebpv, ehp, x, y, s): + """ + Wrapper for ERFA function ``eraApci``. + + Parameters + ---------- + date1 : double array + date2 : double array + ebpv : double array + ehp : double array + x : double array + y : double array + s : double array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A p c i + - - - - - - - - + + For a terrestrial observer, prepare star-independent astrometry + parameters for transformations between ICRS and geocentric CIRS + coordinates. The Earth ephemeris and CIP/CIO are supplied by the + caller. + + The parameters produced by this function are required in the + parallax, light deflection, aberration, and bias-precession-nutation + parts of the astrometric transformation chain. + + Given: + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + ebpv double[2][3] Earth barycentric position/velocity (au, au/day) + ehp double[3] Earth heliocentric position (au) + x,y double CIP X,Y (components of unit vector) + s double the CIO locator s (radians) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double unchanged + refa double unchanged + refb double unchanged + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) All the vectors are with respect to BCRS axes. + + 3) In cases where the caller does not wish to provide the Earth + ephemeris and CIP/CIO, the function eraApci13 can be used instead + of the present function. This computes the required quantities + using other ERFA functions. + + 4) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 5) The context structure astrom produced by this function is used by + eraAtciq* and eraAticq*. + + Called: + eraApcg astrometry parameters, ICRS-GCRS, geocenter + eraC2ixys celestial-to-intermediate matrix, given X,Y and s + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + ebpv_in = numpy.array(ebpv, dtype=numpy.double, order="C", copy=False, subok=True) + ehp_in = numpy.array(ehp, dtype=numpy.double, order="C", copy=False, subok=True) + x_in = numpy.array(x, dtype=numpy.double, order="C", copy=False, subok=True) + y_in = numpy.array(y, dtype=numpy.double, order="C", copy=False, subok=True) + s_in = numpy.array(s, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(ebpv_in, (2, 3), "ebpv") + check_trailing_shape(ehp_in, (3,), "ehp") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, ebpv_in[...,0,0], ehp_in[...,0], x_in, y_in, s_in) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, ebpv_in[...,0,0], ehp_in[...,0], x_in, y_in, s_in, astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*7 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apci(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out + + +def apci13(date1, date2): + """ + Wrapper for ERFA function ``eraApci13``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + astrom : eraASTROM array + eo : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A p c i 1 3 + - - - - - - - - - - + + For a terrestrial observer, prepare star-independent astrometry + parameters for transformations between ICRS and geocentric CIRS + coordinates. The caller supplies the date, and ERFA models are used + to predict the Earth ephemeris and CIP/CIO. + + The parameters produced by this function are required in the + parallax, light deflection, aberration, and bias-precession-nutation + parts of the astrometric transformation chain. + + Given: + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double unchanged + refa double unchanged + refb double unchanged + eo double* equation of the origins (ERA-GST) + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) All the vectors are with respect to BCRS axes. + + 3) In cases where the caller wishes to supply his own Earth + ephemeris and CIP/CIO, the function eraApci can be used instead + of the present function. + + 4) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 5) The context structure astrom produced by this function is used by + eraAtciq* and eraAticq*. + + Called: + eraEpv00 Earth position and velocity + eraPnm06a classical NPB matrix, IAU 2006/2000A + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS06 the CIO locator s, given X,Y, IAU 2006 + eraApci astrometry parameters, ICRS-CIRS + eraEors equation of the origins, given NPB matrix and s + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + eo_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, astrom_out, eo_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apci13(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + assert len(eo_out.shape) > 0 and eo_out.shape[0] == 1 + eo_out = eo_out.reshape(eo_out.shape[1:]) + + return astrom_out, eo_out + + +def apco(date1, date2, ebpv, ehp, x, y, s, theta, elong, phi, hm, xp, yp, sp, refa, refb): + """ + Wrapper for ERFA function ``eraApco``. + + Parameters + ---------- + date1 : double array + date2 : double array + ebpv : double array + ehp : double array + x : double array + y : double array + s : double array + theta : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + sp : double array + refa : double array + refb : double array + + Returns + ------- + refa : double array + refb : double array + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A p c o + - - - - - - - - + + For a terrestrial observer, prepare star-independent astrometry + parameters for transformations between ICRS and observed + coordinates. The caller supplies the Earth ephemeris, the Earth + rotation information and the refraction constants as well as the + site coordinates. + + Given: + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + ebpv double[2][3] Earth barycentric PV (au, au/day, Note 2) + ehp double[3] Earth heliocentric P (au, Note 2) + x,y double CIP X,Y (components of unit vector) + s double the CIO locator s (radians) + theta double Earth rotation angle (radians) + elong double longitude (radians, east +ve, Note 3) + phi double latitude (geodetic, radians, Note 3) + hm double height above ellipsoid (m, geodetic, Note 3) + xp,yp double polar motion coordinates (radians, Note 4) + sp double the TIO locator s' (radians, Note 4) + refa double refraction constant A (radians, Note 5) + refb double refraction constant B (radians, Note 5) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) The vectors eb, eh, and all the astrom vectors, are with respect + to BCRS axes. + + 3) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN + CONVENTION: the longitude required by the present function is + right-handed, i.e. east-positive, in accordance with geographical + convention. + + 4) xp and yp are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions), measured along the + meridians 0 and 90 deg west respectively. sp is the TIO locator + s', in radians, which positions the Terrestrial Intermediate + Origin on the equator. For many applications, xp, yp and + (especially) sp can be set to zero. + + Internally, the polar motion is stored in a form rotated onto the + local meridian. + + 5) The refraction constants refa and refb are for use in a + dZ = A*tan(Z)+B*tan^3(Z) model, where Z is the observed + (i.e. refracted) zenith distance and dZ is the amount of + refraction. + + 6) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + 7) In cases where the caller does not wish to provide the Earth + Ephemeris, the Earth rotation information and refraction + constants, the function eraApco13 can be used instead of the + present function. This starts from UTC and weather readings etc. + and computes suitable values using other ERFA functions. + + 8) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 9) The context structure astrom produced by this function is used by + eraAtioq, eraAtoiq, eraAtciq* and eraAticq*. + + Called: + eraAper astrometry parameters: update ERA + eraC2ixys celestial-to-intermediate matrix, given X,Y and s + eraPvtob position/velocity of terrestrial station + eraTrxpv product of transpose of r-matrix and pv-vector + eraApcs astrometry parameters, ICRS-GCRS, space observer + eraCr copy r-matrix + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + ebpv_in = numpy.array(ebpv, dtype=numpy.double, order="C", copy=False, subok=True) + ehp_in = numpy.array(ehp, dtype=numpy.double, order="C", copy=False, subok=True) + x_in = numpy.array(x, dtype=numpy.double, order="C", copy=False, subok=True) + y_in = numpy.array(y, dtype=numpy.double, order="C", copy=False, subok=True) + s_in = numpy.array(s, dtype=numpy.double, order="C", copy=False, subok=True) + theta_in = numpy.array(theta, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + sp_in = numpy.array(sp, dtype=numpy.double, order="C", copy=False, subok=True) + refa_in = numpy.array(refa, dtype=numpy.double, order="C", copy=False, subok=True) + refb_in = numpy.array(refb, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(ebpv_in, (2, 3), "ebpv") + check_trailing_shape(ehp_in, (3,), "ehp") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, ebpv_in[...,0,0], ehp_in[...,0], x_in, y_in, s_in, theta_in, elong_in, phi_in, hm_in, xp_in, yp_in, sp_in, refa_in, refb_in) + refa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + refb_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + numpy.copyto(refa_out, refa_in) + numpy.copyto(refb_out, refb_in) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, ebpv_in[...,0,0], ehp_in[...,0], x_in, y_in, s_in, theta_in, elong_in, phi_in, hm_in, xp_in, yp_in, sp_in, refa_out, refb_out, astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*14 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apco(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(refa_out.shape) > 0 and refa_out.shape[0] == 1 + refa_out = refa_out.reshape(refa_out.shape[1:]) + assert len(refb_out.shape) > 0 and refb_out.shape[0] == 1 + refb_out = refb_out.reshape(refb_out.shape[1:]) + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return refa_out, refb_out, astrom_out + + +def apco13(utc1, utc2, dut1, elong, phi, hm, xp, yp, phpa, tc, rh, wl): + """ + Wrapper for ERFA function ``eraApco13``. + + Parameters + ---------- + utc1 : double array + utc2 : double array + dut1 : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + phpa : double array + tc : double array + rh : double array + wl : double array + + Returns + ------- + astrom : eraASTROM array + eo : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A p c o 1 3 + - - - - - - - - - - + + For a terrestrial observer, prepare star-independent astrometry + parameters for transformations between ICRS and observed + coordinates. The caller supplies UTC, site coordinates, ambient air + conditions and observing wavelength, and ERFA models are used to + obtain the Earth ephemeris, CIP/CIO and refraction constants. + + The parameters produced by this function are required in the + parallax, light deflection, aberration, and bias-precession-nutation + parts of the ICRS/CIRS transformations. + + Given: + utc1 double UTC as a 2-part... + utc2 double ...quasi Julian Date (Notes 1,2) + dut1 double UT1-UTC (seconds, Note 3) + elong double longitude (radians, east +ve, Note 4) + phi double latitude (geodetic, radians, Note 4) + hm double height above ellipsoid (m, geodetic, Notes 4,6) + xp,yp double polar motion coordinates (radians, Note 5) + phpa double pressure at the observer (hPa = mB, Note 6) + tc double ambient temperature at the observer (deg C) + rh double relative humidity at the observer (range 0-1) + wl double wavelength (micrometers, Note 7) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + eo double* equation of the origins (ERA-GST) + + Returned (function value): + int status: +1 = dubious year (Note 2) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + However, JD cannot unambiguously represent UTC during a leap + second unless special measures are taken. The convention in the + present function is that the JD day represents UTC days whether + the length is 86399, 86400 or 86401 SI seconds. + + Applications should use the function eraDtf2d to convert from + calendar date and time of day into 2-part quasi Julian Date, as + it implements the leap-second-ambiguity convention just + described. + + 2) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the + future to be trusted. See eraDat for further details. + + 3) UT1-UTC is tabulated in IERS bulletins. It increases by exactly + one second at the end of each positive UTC leap second, + introduced in order to keep UT1-UTC within +/- 0.9s. n.b. This + practice is under review, and in the future UT1-UTC may grow + essentially without limit. + + 4) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN: the + longitude required by the present function is east-positive + (i.e. right-handed), in accordance with geographical convention. + + 5) The polar motion xp,yp can be obtained from IERS bulletins. The + values are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions 2003), measured along the + meridians 0 and 90 deg west respectively. For many + applications, xp and yp can be set to zero. + + Internally, the polar motion is stored in a form rotated onto + the local meridian. + + 6) If hm, the height above the ellipsoid of the observing station + in meters, is not known but phpa, the pressure in hPa (=mB), is + available, an adequate estimate of hm can be obtained from the + expression + + hm = -29.3 * tsl * log ( phpa / 1013.25 ); + + where tsl is the approximate sea-level air temperature in K + (See Astrophysical Quantities, C.W.Allen, 3rd edition, section + 52). Similarly, if the pressure phpa is not known, it can be + estimated from the height of the observing station, hm, as + follows: + + phpa = 1013.25 * exp ( -hm / ( 29.3 * tsl ) ); + + Note, however, that the refraction is nearly proportional to + the pressure and that an accurate phpa value is important for + precise work. + + 7) The argument wl specifies the observing wavelength in + micrometers. The transition from optical to radio is assumed to + occur at 100 micrometers (about 3000 GHz). + + 8) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + 9) In cases where the caller wishes to supply his own Earth + ephemeris, Earth rotation information and refraction constants, + the function eraApco can be used instead of the present function. + + 10) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 11) The context structure astrom produced by this function is used + by eraAtioq, eraAtoiq, eraAtciq* and eraAticq*. + + Called: + eraUtctai UTC to TAI + eraTaitt TAI to TT + eraUtcut1 UTC to UT1 + eraEpv00 Earth position and velocity + eraPnm06a classical NPB matrix, IAU 2006/2000A + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS06 the CIO locator s, given X,Y, IAU 2006 + eraEra00 Earth rotation angle, IAU 2000 + eraSp00 the TIO locator s', IERS 2000 + eraRefco refraction constants for given ambient conditions + eraApco astrometry parameters, ICRS-observed + eraEors equation of the origins, given NPB matrix and s + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + phpa_in = numpy.array(phpa, dtype=numpy.double, order="C", copy=False, subok=True) + tc_in = numpy.array(tc, dtype=numpy.double, order="C", copy=False, subok=True) + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + wl_in = numpy.array(wl, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + eo_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in, astrom_out, eo_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*12 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apco13(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'apco13') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + assert len(eo_out.shape) > 0 and eo_out.shape[0] == 1 + eo_out = eo_out.reshape(eo_out.shape[1:]) + + return astrom_out, eo_out +STATUS_CODES['apco13'] = {1: 'dubious year (Note 2)', 0: 'OK', -1: 'unacceptable date'} + + + +def apcs(date1, date2, pv, ebpv, ehp): + """ + Wrapper for ERFA function ``eraApcs``. + + Parameters + ---------- + date1 : double array + date2 : double array + pv : double array + ebpv : double array + ehp : double array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A p c s + - - - - - - - - + + For an observer whose geocentric position and velocity are known, + prepare star-independent astrometry parameters for transformations + between ICRS and GCRS. The Earth ephemeris is supplied by the + caller. + + The parameters produced by this function are required in the space + motion, parallax, light deflection and aberration parts of the + astrometric transformation chain. + + Given: + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + pv double[2][3] observer's geocentric pos/vel (m, m/s) + ebpv double[2][3] Earth barycentric PV (au, au/day) + ehp double[3] Earth heliocentric P (au) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double unchanged + refa double unchanged + refb double unchanged + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) All the vectors are with respect to BCRS axes. + + 3) Providing separate arguments for (i) the observer's geocentric + position and velocity and (ii) the Earth ephemeris is done for + convenience in the geocentric, terrestrial and Earth orbit cases. + For deep space applications it maybe more convenient to specify + zero geocentric position and velocity and to supply the + observer's position and velocity information directly instead of + with respect to the Earth. However, note the different units: + m and m/s for the geocentric vectors, au and au/day for the + heliocentric and barycentric vectors. + + 4) In cases where the caller does not wish to provide the Earth + ephemeris, the function eraApcs13 can be used instead of the + present function. This computes the Earth ephemeris using the + ERFA function eraEpv00. + + 5) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 6) The context structure astrom produced by this function is used by + eraAtciq* and eraAticq*. + + Called: + eraCp copy p-vector + eraPm modulus of p-vector + eraPn decompose p-vector into modulus and direction + eraIr initialize r-matrix to identity + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + pv_in = numpy.array(pv, dtype=numpy.double, order="C", copy=False, subok=True) + ebpv_in = numpy.array(ebpv, dtype=numpy.double, order="C", copy=False, subok=True) + ehp_in = numpy.array(ehp, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(pv_in, (2, 3), "pv") + check_trailing_shape(ebpv_in, (2, 3), "ebpv") + check_trailing_shape(ehp_in, (3,), "ehp") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, pv_in[...,0,0], ebpv_in[...,0,0], ehp_in[...,0]) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, pv_in[...,0,0], ebpv_in[...,0,0], ehp_in[...,0], astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*5 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apcs(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out + + +def apcs13(date1, date2, pv): + """ + Wrapper for ERFA function ``eraApcs13``. + + Parameters + ---------- + date1 : double array + date2 : double array + pv : double array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A p c s 1 3 + - - - - - - - - - - + + For an observer whose geocentric position and velocity are known, + prepare star-independent astrometry parameters for transformations + between ICRS and GCRS. The Earth ephemeris is from ERFA models. + + The parameters produced by this function are required in the space + motion, parallax, light deflection and aberration parts of the + astrometric transformation chain. + + Given: + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + pv double[2][3] observer's geocentric pos/vel (Note 3) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double unchanged + refa double unchanged + refb double unchanged + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) All the vectors are with respect to BCRS axes. + + 3) The observer's position and velocity pv are geocentric but with + respect to BCRS axes, and in units of m and m/s. No assumptions + are made about proximity to the Earth, and the function can be + used for deep space applications as well as Earth orbit and + terrestrial. + + 4) In cases where the caller wishes to supply his own Earth + ephemeris, the function eraApcs can be used instead of the present + function. + + 5) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 6) The context structure astrom produced by this function is used by + eraAtciq* and eraAticq*. + + Called: + eraEpv00 Earth position and velocity + eraApcs astrometry parameters, ICRS-GCRS, space observer + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + pv_in = numpy.array(pv, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(pv_in, (2, 3), "pv") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, pv_in[...,0,0]) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, pv_in[...,0,0], astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apcs13(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out + + +def aper(theta, astrom): + """ + Wrapper for ERFA function ``eraAper``. + + Parameters + ---------- + theta : double array + astrom : eraASTROM array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A p e r + - - - - - - - - + + In the star-independent astrometry parameters, update only the + Earth rotation angle, supplied by the caller explicitly. + + Given: + theta double Earth rotation angle (radians, Note 2) + astrom eraASTROM* star-independent astrometry parameters: + pmt double not used + eb double[3] not used + eh double[3] not used + em double not used + v double[3] not used + bm1 double not used + bpn double[3][3] not used + along double longitude + s' (radians) + xpl double not used + ypl double not used + sphi double not used + cphi double not used + diurab double not used + eral double not used + refa double not used + refb double not used + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double unchanged + eb double[3] unchanged + eh double[3] unchanged + em double unchanged + v double[3] unchanged + bm1 double unchanged + bpn double[3][3] unchanged + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double "local" Earth rotation angle (radians) + refa double unchanged + refb double unchanged + + Notes: + + 1) This function exists to enable sidereal-tracking applications to + avoid wasteful recomputation of the bulk of the astrometry + parameters: only the Earth rotation is updated. + + 2) For targets expressed as equinox based positions, such as + classical geocentric apparent (RA,Dec), the supplied theta can be + Greenwich apparent sidereal time rather than Earth rotation + angle. + + 3) The function eraAper13 can be used instead of the present + function, and starts from UT1 rather than ERA itself. + + 4) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + theta_in = numpy.array(theta, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), theta_in, astrom_in) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + numpy.copyto(astrom_out, astrom_in) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [theta_in, astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._aper(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out + + +def aper13(ut11, ut12, astrom): + """ + Wrapper for ERFA function ``eraAper13``. + + Parameters + ---------- + ut11 : double array + ut12 : double array + astrom : eraASTROM array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A p e r 1 3 + - - - - - - - - - - + + In the star-independent astrometry parameters, update only the + Earth rotation angle. The caller provides UT1, (n.b. not UTC). + + Given: + ut11 double UT1 as a 2-part... + ut12 double ...Julian Date (Note 1) + astrom eraASTROM* star-independent astrometry parameters: + pmt double not used + eb double[3] not used + eh double[3] not used + em double not used + v double[3] not used + bm1 double not used + bpn double[3][3] not used + along double longitude + s' (radians) + xpl double not used + ypl double not used + sphi double not used + cphi double not used + diurab double not used + eral double not used + refa double not used + refb double not used + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double unchanged + eb double[3] unchanged + eh double[3] unchanged + em double unchanged + v double[3] unchanged + bm1 double unchanged + bpn double[3][3] unchanged + along double unchanged + xpl double unchanged + ypl double unchanged + sphi double unchanged + cphi double unchanged + diurab double unchanged + eral double "local" Earth rotation angle (radians) + refa double unchanged + refb double unchanged + + Notes: + + 1) The UT1 date (n.b. not UTC) ut11+ut12 is a Julian Date, + apportioned in any convenient way between the arguments ut11 and + ut12. For example, JD(UT1)=2450123.7 could be expressed in any + of these ways, among others: + + ut11 ut12 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. The date & time method is + best matched to the algorithm used: maximum precision is + delivered when the ut11 argument is for 0hrs UT1 on the day in + question and the ut12 argument lies in the range 0 to 1, or vice + versa. + + 2) If the caller wishes to provide the Earth rotation angle itself, + the function eraAper can be used instead. One use of this + technique is to substitute Greenwich apparent sidereal time and + thereby to support equinox based transformations directly. + + 3) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + Called: + eraAper astrometry parameters: update ERA + eraEra00 Earth rotation angle, IAU 2000 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ut11_in = numpy.array(ut11, dtype=numpy.double, order="C", copy=False, subok=True) + ut12_in = numpy.array(ut12, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ut11_in, ut12_in, astrom_in) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + numpy.copyto(astrom_out, astrom_in) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ut11_in, ut12_in, astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._aper13(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out + + +def apio(sp, theta, elong, phi, hm, xp, yp, refa, refb): + """ + Wrapper for ERFA function ``eraApio``. + + Parameters + ---------- + sp : double array + theta : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + refa : double array + refb : double array + + Returns + ------- + refa : double array + refb : double array + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A p i o + - - - - - - - - + + For a terrestrial observer, prepare star-independent astrometry + parameters for transformations between CIRS and observed + coordinates. The caller supplies the Earth orientation information + and the refraction constants as well as the site coordinates. + + Given: + sp double the TIO locator s' (radians, Note 1) + theta double Earth rotation angle (radians) + elong double longitude (radians, east +ve, Note 2) + phi double geodetic latitude (radians, Note 2) + hm double height above ellipsoid (m, geodetic Note 2) + xp,yp double polar motion coordinates (radians, Note 3) + refa double refraction constant A (radians, Note 4) + refb double refraction constant B (radians, Note 4) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double unchanged + eb double[3] unchanged + eh double[3] unchanged + em double unchanged + v double[3] unchanged + bm1 double unchanged + bpn double[3][3] unchanged + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Notes: + + 1) sp, the TIO locator s', is a tiny quantity needed only by the + most precise applications. It can either be set to zero or + predicted using the ERFA function eraSp00. + + 2) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN: the + longitude required by the present function is east-positive + (i.e. right-handed), in accordance with geographical convention. + + 3) The polar motion xp,yp can be obtained from IERS bulletins. The + values are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions 2003), measured along the + meridians 0 and 90 deg west respectively. For many applications, + xp and yp can be set to zero. + + Internally, the polar motion is stored in a form rotated onto the + local meridian. + + 4) The refraction constants refa and refb are for use in a + dZ = A*tan(Z)+B*tan^3(Z) model, where Z is the observed + (i.e. refracted) zenith distance and dZ is the amount of + refraction. + + 5) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + 6) In cases where the caller does not wish to provide the Earth + rotation information and refraction constants, the function + eraApio13 can be used instead of the present function. This + starts from UTC and weather readings etc. and computes suitable + values using other ERFA functions. + + 7) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 8) The context structure astrom produced by this function is used by + eraAtioq and eraAtoiq. + + Called: + eraPvtob position/velocity of terrestrial station + eraAper astrometry parameters: update ERA + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + sp_in = numpy.array(sp, dtype=numpy.double, order="C", copy=False, subok=True) + theta_in = numpy.array(theta, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + refa_in = numpy.array(refa, dtype=numpy.double, order="C", copy=False, subok=True) + refb_in = numpy.array(refb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), sp_in, theta_in, elong_in, phi_in, hm_in, xp_in, yp_in, refa_in, refb_in) + refa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + refb_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + numpy.copyto(refa_out, refa_in) + numpy.copyto(refb_out, refb_in) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [sp_in, theta_in, elong_in, phi_in, hm_in, xp_in, yp_in, refa_out, refb_out, astrom_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*7 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apio(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(refa_out.shape) > 0 and refa_out.shape[0] == 1 + refa_out = refa_out.reshape(refa_out.shape[1:]) + assert len(refb_out.shape) > 0 and refb_out.shape[0] == 1 + refb_out = refb_out.reshape(refb_out.shape[1:]) + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return refa_out, refb_out, astrom_out + + +def apio13(utc1, utc2, dut1, elong, phi, hm, xp, yp, phpa, tc, rh, wl): + """ + Wrapper for ERFA function ``eraApio13``. + + Parameters + ---------- + utc1 : double array + utc2 : double array + dut1 : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + phpa : double array + tc : double array + rh : double array + wl : double array + + Returns + ------- + astrom : eraASTROM array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A p i o 1 3 + - - - - - - - - - - + + For a terrestrial observer, prepare star-independent astrometry + parameters for transformations between CIRS and observed + coordinates. The caller supplies UTC, site coordinates, ambient air + conditions and observing wavelength. + + Given: + utc1 double UTC as a 2-part... + utc2 double ...quasi Julian Date (Notes 1,2) + dut1 double UT1-UTC (seconds) + elong double longitude (radians, east +ve, Note 3) + phi double geodetic latitude (radians, Note 3) + hm double height above ellipsoid (m, geodetic Notes 4,6) + xp,yp double polar motion coordinates (radians, Note 5) + phpa double pressure at the observer (hPa = mB, Note 6) + tc double ambient temperature at the observer (deg C) + rh double relative humidity at the observer (range 0-1) + wl double wavelength (micrometers, Note 7) + + Returned: + astrom eraASTROM* star-independent astrometry parameters: + pmt double unchanged + eb double[3] unchanged + eh double[3] unchanged + em double unchanged + v double[3] unchanged + bm1 double unchanged + bpn double[3][3] unchanged + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Returned (function value): + int status: +1 = dubious year (Note 2) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + However, JD cannot unambiguously represent UTC during a leap + second unless special measures are taken. The convention in the + present function is that the JD day represents UTC days whether + the length is 86399, 86400 or 86401 SI seconds. + + Applications should use the function eraDtf2d to convert from + calendar date and time of day into 2-part quasi Julian Date, as + it implements the leap-second-ambiguity convention just + described. + + 2) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the future + to be trusted. See eraDat for further details. + + 3) UT1-UTC is tabulated in IERS bulletins. It increases by exactly + one second at the end of each positive UTC leap second, + introduced in order to keep UT1-UTC within +/- 0.9s. n.b. This + practice is under review, and in the future UT1-UTC may grow + essentially without limit. + + 4) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN: the + longitude required by the present function is east-positive + (i.e. right-handed), in accordance with geographical convention. + + 5) The polar motion xp,yp can be obtained from IERS bulletins. The + values are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions 2003), measured along the + meridians 0 and 90 deg west respectively. For many applications, + xp and yp can be set to zero. + + Internally, the polar motion is stored in a form rotated onto + the local meridian. + + 6) If hm, the height above the ellipsoid of the observing station + in meters, is not known but phpa, the pressure in hPa (=mB), is + available, an adequate estimate of hm can be obtained from the + expression + + hm = -29.3 * tsl * log ( phpa / 1013.25 ); + + where tsl is the approximate sea-level air temperature in K + (See Astrophysical Quantities, C.W.Allen, 3rd edition, section + 52). Similarly, if the pressure phpa is not known, it can be + estimated from the height of the observing station, hm, as + follows: + + phpa = 1013.25 * exp ( -hm / ( 29.3 * tsl ) ); + + Note, however, that the refraction is nearly proportional to the + pressure and that an accurate phpa value is important for + precise work. + + 7) The argument wl specifies the observing wavelength in + micrometers. The transition from optical to radio is assumed to + occur at 100 micrometers (about 3000 GHz). + + 8) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + 9) In cases where the caller wishes to supply his own Earth + rotation information and refraction constants, the function + eraApc can be used instead of the present function. + + 10) This is one of several functions that inserts into the astrom + structure star-independent parameters needed for the chain of + astrometric transformations ICRS <-> GCRS <-> CIRS <-> observed. + + The various functions support different classes of observer and + portions of the transformation chain: + + functions observer transformation + + eraApcg eraApcg13 geocentric ICRS <-> GCRS + eraApci eraApci13 terrestrial ICRS <-> CIRS + eraApco eraApco13 terrestrial ICRS <-> observed + eraApcs eraApcs13 space ICRS <-> GCRS + eraAper eraAper13 terrestrial update Earth rotation + eraApio eraApio13 terrestrial CIRS <-> observed + + Those with names ending in "13" use contemporary ERFA models to + compute the various ephemerides. The others accept ephemerides + supplied by the caller. + + The transformation from ICRS to GCRS covers space motion, + parallax, light deflection, and aberration. From GCRS to CIRS + comprises frame bias and precession-nutation. From CIRS to + observed takes account of Earth rotation, polar motion, diurnal + aberration and parallax (unless subsumed into the ICRS <-> GCRS + transformation), and atmospheric refraction. + + 11) The context structure astrom produced by this function is used + by eraAtioq and eraAtoiq. + + Called: + eraUtctai UTC to TAI + eraTaitt TAI to TT + eraUtcut1 UTC to UT1 + eraSp00 the TIO locator s', IERS 2000 + eraEra00 Earth rotation angle, IAU 2000 + eraRefco refraction constants for given ambient conditions + eraApio astrometry parameters, CIRS-observed + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + phpa_in = numpy.array(phpa, dtype=numpy.double, order="C", copy=False, subok=True) + tc_in = numpy.array(tc, dtype=numpy.double, order="C", copy=False, subok=True) + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + wl_in = numpy.array(wl, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in) + astrom_out = numpy.empty(broadcast.shape + (), dtype=dt_eraASTROM) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in, astrom_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*12 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._apio13(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'apio13') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(astrom_out.shape) > 0 and astrom_out.shape[0] == 1 + astrom_out = astrom_out.reshape(astrom_out.shape[1:]) + + return astrom_out +STATUS_CODES['apio13'] = {1: 'dubious year (Note 2)', 0: 'OK', -1: 'unacceptable date'} + + + +def atci13(rc, dc, pr, pd, px, rv, date1, date2): + """ + Wrapper for ERFA function ``eraAtci13``. + + Parameters + ---------- + rc : double array + dc : double array + pr : double array + pd : double array + px : double array + rv : double array + date1 : double array + date2 : double array + + Returns + ------- + ri : double array + di : double array + eo : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t c i 1 3 + - - - - - - - - - - + + Transform ICRS star data, epoch J2000.0, to CIRS. + + Given: + rc double ICRS right ascension at J2000.0 (radians, Note 1) + dc double ICRS declination at J2000.0 (radians, Note 1) + pr double RA proper motion (radians/year; Note 2) + pd double Dec proper motion (radians/year) + px double parallax (arcsec) + rv double radial velocity (km/s, +ve if receding) + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 3) + + Returned: + ri,di double* CIRS geocentric RA,Dec (radians) + eo double* equation of the origins (ERA-GST, Note 5) + + Notes: + + 1) Star data for an epoch other than J2000.0 (for example from the + Hipparcos catalog, which has an epoch of J1991.25) will require a + preliminary call to eraPmsafe before use. + + 2) The proper motion in RA is dRA/dt rather than cos(Dec)*dRA/dt. + + 3) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 4) The available accuracy is better than 1 milliarcsecond, limited + mainly by the precession-nutation model that is used, namely + IAU 2000A/2006. Very close to solar system bodies, additional + errors of up to several milliarcseconds can occur because of + unmodeled light deflection; however, the Sun's contribution is + taken into account, to first order. The accuracy limitations of + the ERFA function eraEpv00 (used to compute Earth position and + velocity) can contribute aberration errors of up to + 5 microarcseconds. Light deflection at the Sun's limb is + uncertain at the 0.4 mas level. + + 5) Should the transformation to (equinox based) apparent place be + required rather than (CIO based) intermediate place, subtract the + equation of the origins from the returned right ascension: + RA = RI - EO. (The eraAnp function can then be applied, as + required, to keep the result in the conventional 0-2pi range.) + + Called: + eraApci13 astrometry parameters, ICRS-CIRS, 2013 + eraAtciq quick ICRS to CIRS + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rc_in = numpy.array(rc, dtype=numpy.double, order="C", copy=False, subok=True) + dc_in = numpy.array(dc, dtype=numpy.double, order="C", copy=False, subok=True) + pr_in = numpy.array(pr, dtype=numpy.double, order="C", copy=False, subok=True) + pd_in = numpy.array(pd, dtype=numpy.double, order="C", copy=False, subok=True) + px_in = numpy.array(px, dtype=numpy.double, order="C", copy=False, subok=True) + rv_in = numpy.array(rv, dtype=numpy.double, order="C", copy=False, subok=True) + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rc_in, dc_in, pr_in, pd_in, px_in, rv_in, date1_in, date2_in) + ri_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + di_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + eo_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rc_in, dc_in, pr_in, pd_in, px_in, rv_in, date1_in, date2_in, ri_out, di_out, eo_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*8 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atci13(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ri_out.shape) > 0 and ri_out.shape[0] == 1 + ri_out = ri_out.reshape(ri_out.shape[1:]) + assert len(di_out.shape) > 0 and di_out.shape[0] == 1 + di_out = di_out.reshape(di_out.shape[1:]) + assert len(eo_out.shape) > 0 and eo_out.shape[0] == 1 + eo_out = eo_out.reshape(eo_out.shape[1:]) + + return ri_out, di_out, eo_out + + +def atciq(rc, dc, pr, pd, px, rv, astrom): + """ + Wrapper for ERFA function ``eraAtciq``. + + Parameters + ---------- + rc : double array + dc : double array + pr : double array + pd : double array + px : double array + rv : double array + astrom : eraASTROM array + + Returns + ------- + ri : double array + di : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a A t c i q + - - - - - - - - - + + Quick ICRS, epoch J2000.0, to CIRS transformation, given precomputed + star-independent astrometry parameters. + + Use of this function is appropriate when efficiency is important and + where many star positions are to be transformed for one date. The + star-independent parameters can be obtained by calling one of the + functions eraApci[13], eraApcg[13], eraApco[13] or eraApcs[13]. + + If the parallax and proper motions are zero the eraAtciqz function + can be used instead. + + Given: + rc,dc double ICRS RA,Dec at J2000.0 (radians) + pr double RA proper motion (radians/year; Note 3) + pd double Dec proper motion (radians/year) + px double parallax (arcsec) + rv double radial velocity (km/s, +ve if receding) + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Returned: + ri,di double CIRS RA,Dec (radians) + + Notes: + + 1) All the vectors are with respect to BCRS axes. + + 2) Star data for an epoch other than J2000.0 (for example from the + Hipparcos catalog, which has an epoch of J1991.25) will require a + preliminary call to eraPmsafe before use. + + 3) The proper motion in RA is dRA/dt rather than cos(Dec)*dRA/dt. + + Called: + eraPmpx proper motion and parallax + eraLdsun light deflection by the Sun + eraAb stellar aberration + eraRxp product of r-matrix and pv-vector + eraC2s p-vector to spherical + eraAnp normalize angle into range 0 to 2pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rc_in = numpy.array(rc, dtype=numpy.double, order="C", copy=False, subok=True) + dc_in = numpy.array(dc, dtype=numpy.double, order="C", copy=False, subok=True) + pr_in = numpy.array(pr, dtype=numpy.double, order="C", copy=False, subok=True) + pd_in = numpy.array(pd, dtype=numpy.double, order="C", copy=False, subok=True) + px_in = numpy.array(px, dtype=numpy.double, order="C", copy=False, subok=True) + rv_in = numpy.array(rv, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rc_in, dc_in, pr_in, pd_in, px_in, rv_in, astrom_in) + ri_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + di_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rc_in, dc_in, pr_in, pd_in, px_in, rv_in, astrom_in, ri_out, di_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*7 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atciq(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ri_out.shape) > 0 and ri_out.shape[0] == 1 + ri_out = ri_out.reshape(ri_out.shape[1:]) + assert len(di_out.shape) > 0 and di_out.shape[0] == 1 + di_out = di_out.reshape(di_out.shape[1:]) + + return ri_out, di_out + + +def atciqn(rc, dc, pr, pd, px, rv, astrom, n, b): + """ + Wrapper for ERFA function ``eraAtciqn``. + + Parameters + ---------- + rc : double array + dc : double array + pr : double array + pd : double array + px : double array + rv : double array + astrom : eraASTROM array + n : int array + b : eraLDBODY array + + Returns + ------- + ri : double array + di : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t c i q n + - - - - - - - - - - + + Quick ICRS, epoch J2000.0, to CIRS transformation, given precomputed + star-independent astrometry parameters plus a list of light- + deflecting bodies. + + Use of this function is appropriate when efficiency is important and + where many star positions are to be transformed for one date. The + star-independent parameters can be obtained by calling one of the + functions eraApci[13], eraApcg[13], eraApco[13] or eraApcs[13]. + + + If the only light-deflecting body to be taken into account is the + Sun, the eraAtciq function can be used instead. If in addition the + parallax and proper motions are zero, the eraAtciqz function can be + used. + + Given: + rc,dc double ICRS RA,Dec at J2000.0 (radians) + pr double RA proper motion (radians/year; Note 3) + pd double Dec proper motion (radians/year) + px double parallax (arcsec) + rv double radial velocity (km/s, +ve if receding) + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + n int number of bodies (Note 3) + b eraLDBODY[n] data for each of the n bodies (Notes 3,4): + bm double mass of the body (solar masses, Note 5) + dl double deflection limiter (Note 6) + pv [2][3] barycentric PV of the body (au, au/day) + + Returned: + ri,di double CIRS RA,Dec (radians) + + Notes: + + 1) Star data for an epoch other than J2000.0 (for example from the + Hipparcos catalog, which has an epoch of J1991.25) will require a + preliminary call to eraPmsafe before use. + + 2) The proper motion in RA is dRA/dt rather than cos(Dec)*dRA/dt. + + 3) The struct b contains n entries, one for each body to be + considered. If n = 0, no gravitational light deflection will be + applied, not even for the Sun. + + 4) The struct b should include an entry for the Sun as well as for + any planet or other body to be taken into account. The entries + should be in the order in which the light passes the body. + + 5) In the entry in the b struct for body i, the mass parameter + b[i].bm can, as required, be adjusted in order to allow for such + effects as quadrupole field. + + 6) The deflection limiter parameter b[i].dl is phi^2/2, where phi is + the angular separation (in radians) between star and body at + which limiting is applied. As phi shrinks below the chosen + threshold, the deflection is artificially reduced, reaching zero + for phi = 0. Example values suitable for a terrestrial + observer, together with masses, are as follows: + + body i b[i].bm b[i].dl + + Sun 1.0 6e-6 + Jupiter 0.00095435 3e-9 + Saturn 0.00028574 3e-10 + + 7) For efficiency, validation of the contents of the b array is + omitted. The supplied masses must be greater than zero, the + position and velocity vectors must be right, and the deflection + limiter greater than zero. + + Called: + eraPmpx proper motion and parallax + eraLdn light deflection by n bodies + eraAb stellar aberration + eraRxp product of r-matrix and pv-vector + eraC2s p-vector to spherical + eraAnp normalize angle into range 0 to 2pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rc_in = numpy.array(rc, dtype=numpy.double, order="C", copy=False, subok=True) + dc_in = numpy.array(dc, dtype=numpy.double, order="C", copy=False, subok=True) + pr_in = numpy.array(pr, dtype=numpy.double, order="C", copy=False, subok=True) + pd_in = numpy.array(pd, dtype=numpy.double, order="C", copy=False, subok=True) + px_in = numpy.array(px, dtype=numpy.double, order="C", copy=False, subok=True) + rv_in = numpy.array(rv, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + n_in = numpy.array(n, dtype=numpy.intc, order="C", copy=False, subok=True) + b_in = numpy.array(b, dtype=dt_eraLDBODY, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rc_in, dc_in, pr_in, pd_in, px_in, rv_in, astrom_in, n_in, b_in) + ri_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + di_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rc_in, dc_in, pr_in, pd_in, px_in, rv_in, astrom_in, n_in, b_in, ri_out, di_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*9 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atciqn(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ri_out.shape) > 0 and ri_out.shape[0] == 1 + ri_out = ri_out.reshape(ri_out.shape[1:]) + assert len(di_out.shape) > 0 and di_out.shape[0] == 1 + di_out = di_out.reshape(di_out.shape[1:]) + + return ri_out, di_out + + +def atciqz(rc, dc, astrom): + """ + Wrapper for ERFA function ``eraAtciqz``. + + Parameters + ---------- + rc : double array + dc : double array + astrom : eraASTROM array + + Returns + ------- + ri : double array + di : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t c i q z + - - - - - - - - - - + + Quick ICRS to CIRS transformation, given precomputed star- + independent astrometry parameters, and assuming zero parallax and + proper motion. + + Use of this function is appropriate when efficiency is important and + where many star positions are to be transformed for one date. The + star-independent parameters can be obtained by calling one of the + functions eraApci[13], eraApcg[13], eraApco[13] or eraApcs[13]. + + The corresponding function for the case of non-zero parallax and + proper motion is eraAtciq. + + Given: + rc,dc double ICRS astrometric RA,Dec (radians) + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Returned: + ri,di double CIRS RA,Dec (radians) + + Note: + + All the vectors are with respect to BCRS axes. + + References: + + Urban, S. & Seidelmann, P. K. (eds), Explanatory Supplement to + the Astronomical Almanac, 3rd ed., University Science Books + (2013). + + Klioner, Sergei A., "A practical relativistic model for micro- + arcsecond astrometry in space", Astr. J. 125, 1580-1597 (2003). + + Called: + eraS2c spherical coordinates to unit vector + eraLdsun light deflection due to Sun + eraAb stellar aberration + eraRxp product of r-matrix and p-vector + eraC2s p-vector to spherical + eraAnp normalize angle into range +/- pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rc_in = numpy.array(rc, dtype=numpy.double, order="C", copy=False, subok=True) + dc_in = numpy.array(dc, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rc_in, dc_in, astrom_in) + ri_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + di_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rc_in, dc_in, astrom_in, ri_out, di_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atciqz(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ri_out.shape) > 0 and ri_out.shape[0] == 1 + ri_out = ri_out.reshape(ri_out.shape[1:]) + assert len(di_out.shape) > 0 and di_out.shape[0] == 1 + di_out = di_out.reshape(di_out.shape[1:]) + + return ri_out, di_out + + +def atco13(rc, dc, pr, pd, px, rv, utc1, utc2, dut1, elong, phi, hm, xp, yp, phpa, tc, rh, wl): + """ + Wrapper for ERFA function ``eraAtco13``. + + Parameters + ---------- + rc : double array + dc : double array + pr : double array + pd : double array + px : double array + rv : double array + utc1 : double array + utc2 : double array + dut1 : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + phpa : double array + tc : double array + rh : double array + wl : double array + + Returns + ------- + aob : double array + zob : double array + hob : double array + dob : double array + rob : double array + eo : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t c o 1 3 + - - - - - - - - - - + + ICRS RA,Dec to observed place. The caller supplies UTC, site + coordinates, ambient air conditions and observing wavelength. + + ERFA models are used for the Earth ephemeris, bias-precession- + nutation, Earth orientation and refraction. + + Given: + rc,dc double ICRS right ascension at J2000.0 (radians, Note 1) + pr double RA proper motion (radians/year; Note 2) + pd double Dec proper motion (radians/year) + px double parallax (arcsec) + rv double radial velocity (km/s, +ve if receding) + utc1 double UTC as a 2-part... + utc2 double ...quasi Julian Date (Notes 3-4) + dut1 double UT1-UTC (seconds, Note 5) + elong double longitude (radians, east +ve, Note 6) + phi double latitude (geodetic, radians, Note 6) + hm double height above ellipsoid (m, geodetic, Notes 6,8) + xp,yp double polar motion coordinates (radians, Note 7) + phpa double pressure at the observer (hPa = mB, Note 8) + tc double ambient temperature at the observer (deg C) + rh double relative humidity at the observer (range 0-1) + wl double wavelength (micrometers, Note 9) + + Returned: + aob double* observed azimuth (radians: N=0,E=90) + zob double* observed zenith distance (radians) + hob double* observed hour angle (radians) + dob double* observed declination (radians) + rob double* observed right ascension (CIO-based, radians) + eo double* equation of the origins (ERA-GST) + + Returned (function value): + int status: +1 = dubious year (Note 4) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) Star data for an epoch other than J2000.0 (for example from the + Hipparcos catalog, which has an epoch of J1991.25) will require + a preliminary call to eraPmsafe before use. + + 2) The proper motion in RA is dRA/dt rather than cos(Dec)*dRA/dt. + + 3) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + However, JD cannot unambiguously represent UTC during a leap + second unless special measures are taken. The convention in the + present function is that the JD day represents UTC days whether + the length is 86399, 86400 or 86401 SI seconds. + + Applications should use the function eraDtf2d to convert from + calendar date and time of day into 2-part quasi Julian Date, as + it implements the leap-second-ambiguity convention just + described. + + 4) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the + future to be trusted. See eraDat for further details. + + 5) UT1-UTC is tabulated in IERS bulletins. It increases by exactly + one second at the end of each positive UTC leap second, + introduced in order to keep UT1-UTC within +/- 0.9s. n.b. This + practice is under review, and in the future UT1-UTC may grow + essentially without limit. + + 6) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN: the + longitude required by the present function is east-positive + (i.e. right-handed), in accordance with geographical convention. + + 7) The polar motion xp,yp can be obtained from IERS bulletins. The + values are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions 2003), measured along the + meridians 0 and 90 deg west respectively. For many + applications, xp and yp can be set to zero. + + 8) If hm, the height above the ellipsoid of the observing station + in meters, is not known but phpa, the pressure in hPa (=mB), + is available, an adequate estimate of hm can be obtained from + the expression + + hm = -29.3 * tsl * log ( phpa / 1013.25 ); + + where tsl is the approximate sea-level air temperature in K + (See Astrophysical Quantities, C.W.Allen, 3rd edition, section + 52). Similarly, if the pressure phpa is not known, it can be + estimated from the height of the observing station, hm, as + follows: + + phpa = 1013.25 * exp ( -hm / ( 29.3 * tsl ) ); + + Note, however, that the refraction is nearly proportional to + the pressure and that an accurate phpa value is important for + precise work. + + 9) The argument wl specifies the observing wavelength in + micrometers. The transition from optical to radio is assumed to + occur at 100 micrometers (about 3000 GHz). + + 10) The accuracy of the result is limited by the corrections for + refraction, which use a simple A*tan(z) + B*tan^3(z) model. + Providing the meteorological parameters are known accurately and + there are no gross local effects, the predicted observed + coordinates should be within 0.05 arcsec (optical) or 1 arcsec + (radio) for a zenith distance of less than 70 degrees, better + than 30 arcsec (optical or radio) at 85 degrees and better + than 20 arcmin (optical) or 30 arcmin (radio) at the horizon. + + Without refraction, the complementary functions eraAtco13 and + eraAtoc13 are self-consistent to better than 1 microarcsecond + all over the celestial sphere. With refraction included, + consistency falls off at high zenith distances, but is still + better than 0.05 arcsec at 85 degrees. + + 11) "Observed" Az,ZD means the position that would be seen by a + perfect geodetically aligned theodolite. (Zenith distance is + used rather than altitude in order to reflect the fact that no + allowance is made for depression of the horizon.) This is + related to the observed HA,Dec via the standard rotation, using + the geodetic latitude (corrected for polar motion), while the + observed HA and RA are related simply through the Earth rotation + angle and the site longitude. "Observed" RA,Dec or HA,Dec thus + means the position that would be seen by a perfect equatorial + with its polar axis aligned to the Earth's axis of rotation. + + 12) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + Called: + eraApco13 astrometry parameters, ICRS-observed, 2013 + eraAtciq quick ICRS to CIRS + eraAtioq quick CIRS to observed + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rc_in = numpy.array(rc, dtype=numpy.double, order="C", copy=False, subok=True) + dc_in = numpy.array(dc, dtype=numpy.double, order="C", copy=False, subok=True) + pr_in = numpy.array(pr, dtype=numpy.double, order="C", copy=False, subok=True) + pd_in = numpy.array(pd, dtype=numpy.double, order="C", copy=False, subok=True) + px_in = numpy.array(px, dtype=numpy.double, order="C", copy=False, subok=True) + rv_in = numpy.array(rv, dtype=numpy.double, order="C", copy=False, subok=True) + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + phpa_in = numpy.array(phpa, dtype=numpy.double, order="C", copy=False, subok=True) + tc_in = numpy.array(tc, dtype=numpy.double, order="C", copy=False, subok=True) + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + wl_in = numpy.array(wl, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rc_in, dc_in, pr_in, pd_in, px_in, rv_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in) + aob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + zob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + hob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + eo_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rc_in, dc_in, pr_in, pd_in, px_in, rv_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in, aob_out, zob_out, hob_out, dob_out, rob_out, eo_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*18 + [['readwrite']]*7 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atco13(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'atco13') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(aob_out.shape) > 0 and aob_out.shape[0] == 1 + aob_out = aob_out.reshape(aob_out.shape[1:]) + assert len(zob_out.shape) > 0 and zob_out.shape[0] == 1 + zob_out = zob_out.reshape(zob_out.shape[1:]) + assert len(hob_out.shape) > 0 and hob_out.shape[0] == 1 + hob_out = hob_out.reshape(hob_out.shape[1:]) + assert len(dob_out.shape) > 0 and dob_out.shape[0] == 1 + dob_out = dob_out.reshape(dob_out.shape[1:]) + assert len(rob_out.shape) > 0 and rob_out.shape[0] == 1 + rob_out = rob_out.reshape(rob_out.shape[1:]) + assert len(eo_out.shape) > 0 and eo_out.shape[0] == 1 + eo_out = eo_out.reshape(eo_out.shape[1:]) + + return aob_out, zob_out, hob_out, dob_out, rob_out, eo_out +STATUS_CODES['atco13'] = {1: 'dubious year (Note 4)', 0: 'OK', -1: 'unacceptable date'} + + + +def atic13(ri, di, date1, date2): + """ + Wrapper for ERFA function ``eraAtic13``. + + Parameters + ---------- + ri : double array + di : double array + date1 : double array + date2 : double array + + Returns + ------- + rc : double array + dc : double array + eo : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t i c 1 3 + - - - - - - - - - - + + Transform star RA,Dec from geocentric CIRS to ICRS astrometric. + + Given: + ri,di double CIRS geocentric RA,Dec (radians) + date1 double TDB as a 2-part... + date2 double ...Julian Date (Note 1) + + Returned: + rc,dc double ICRS astrometric RA,Dec (radians) + eo double equation of the origins (ERA-GST, Note 4) + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. For most + applications of this function the choice will not be at all + critical. + + TT can be used instead of TDB without any significant impact on + accuracy. + + 2) Iterative techniques are used for the aberration and light + deflection corrections so that the functions eraAtic13 (or + eraAticq) and eraAtci13 (or eraAtciq) are accurate inverses; + even at the edge of the Sun's disk the discrepancy is only about + 1 nanoarcsecond. + + 3) The available accuracy is better than 1 milliarcsecond, limited + mainly by the precession-nutation model that is used, namely + IAU 2000A/2006. Very close to solar system bodies, additional + errors of up to several milliarcseconds can occur because of + unmodeled light deflection; however, the Sun's contribution is + taken into account, to first order. The accuracy limitations of + the ERFA function eraEpv00 (used to compute Earth position and + velocity) can contribute aberration errors of up to + 5 microarcseconds. Light deflection at the Sun's limb is + uncertain at the 0.4 mas level. + + 4) Should the transformation to (equinox based) J2000.0 mean place + be required rather than (CIO based) ICRS coordinates, subtract the + equation of the origins from the returned right ascension: + RA = RI - EO. (The eraAnp function can then be applied, as + required, to keep the result in the conventional 0-2pi range.) + + Called: + eraApci13 astrometry parameters, ICRS-CIRS, 2013 + eraAticq quick CIRS to ICRS astrometric + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ri_in = numpy.array(ri, dtype=numpy.double, order="C", copy=False, subok=True) + di_in = numpy.array(di, dtype=numpy.double, order="C", copy=False, subok=True) + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ri_in, di_in, date1_in, date2_in) + rc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + eo_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ri_in, di_in, date1_in, date2_in, rc_out, dc_out, eo_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atic13(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc_out.shape) > 0 and rc_out.shape[0] == 1 + rc_out = rc_out.reshape(rc_out.shape[1:]) + assert len(dc_out.shape) > 0 and dc_out.shape[0] == 1 + dc_out = dc_out.reshape(dc_out.shape[1:]) + assert len(eo_out.shape) > 0 and eo_out.shape[0] == 1 + eo_out = eo_out.reshape(eo_out.shape[1:]) + + return rc_out, dc_out, eo_out + + +def aticq(ri, di, astrom): + """ + Wrapper for ERFA function ``eraAticq``. + + Parameters + ---------- + ri : double array + di : double array + astrom : eraASTROM array + + Returns + ------- + rc : double array + dc : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a A t i c q + - - - - - - - - - + + Quick CIRS RA,Dec to ICRS astrometric place, given the star- + independent astrometry parameters. + + Use of this function is appropriate when efficiency is important and + where many star positions are all to be transformed for one date. + The star-independent astrometry parameters can be obtained by + calling one of the functions eraApci[13], eraApcg[13], eraApco[13] + or eraApcs[13]. + + Given: + ri,di double CIRS RA,Dec (radians) + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Returned: + rc,dc double ICRS astrometric RA,Dec (radians) + + Notes: + + 1) Only the Sun is taken into account in the light deflection + correction. + + 2) Iterative techniques are used for the aberration and light + deflection corrections so that the functions eraAtic13 (or + eraAticq) and eraAtci13 (or eraAtciq) are accurate inverses; + even at the edge of the Sun's disk the discrepancy is only about + 1 nanoarcsecond. + + Called: + eraS2c spherical coordinates to unit vector + eraTrxp product of transpose of r-matrix and p-vector + eraZp zero p-vector + eraAb stellar aberration + eraLdsun light deflection by the Sun + eraC2s p-vector to spherical + eraAnp normalize angle into range +/- pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ri_in = numpy.array(ri, dtype=numpy.double, order="C", copy=False, subok=True) + di_in = numpy.array(di, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ri_in, di_in, astrom_in) + rc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ri_in, di_in, astrom_in, rc_out, dc_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._aticq(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc_out.shape) > 0 and rc_out.shape[0] == 1 + rc_out = rc_out.reshape(rc_out.shape[1:]) + assert len(dc_out.shape) > 0 and dc_out.shape[0] == 1 + dc_out = dc_out.reshape(dc_out.shape[1:]) + + return rc_out, dc_out + + +def aticqn(ri, di, astrom, n, b): + """ + Wrapper for ERFA function ``eraAticqn``. + + Parameters + ---------- + ri : double array + di : double array + astrom : eraASTROM array + n : int array + b : eraLDBODY array + + Returns + ------- + rc : double array + dc : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a A t i c q n + - - - - - - - - - + + Quick CIRS to ICRS astrometric place transformation, given the star- + independent astrometry parameters plus a list of light-deflecting + bodies. + + Use of this function is appropriate when efficiency is important and + where many star positions are all to be transformed for one date. + The star-independent astrometry parameters can be obtained by + calling one of the functions eraApci[13], eraApcg[13], eraApco[13] + or eraApcs[13]. +* +* If the only light-deflecting body to be taken into account is the +* Sun, the eraAticq function can be used instead. + + Given: + ri,di double CIRS RA,Dec (radians) + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + n int number of bodies (Note 3) + b eraLDBODY[n] data for each of the n bodies (Notes 3,4): + bm double mass of the body (solar masses, Note 5) + dl double deflection limiter (Note 6) + pv [2][3] barycentric PV of the body (au, au/day) + + Returned: + rc,dc double ICRS astrometric RA,Dec (radians) + + Notes: + + 1) Iterative techniques are used for the aberration and light + deflection corrections so that the functions eraAticqn and + eraAtciqn are accurate inverses; even at the edge of the Sun's + disk the discrepancy is only about 1 nanoarcsecond. + + 2) If the only light-deflecting body to be taken into account is the + Sun, the eraAticq function can be used instead. + + 3) The struct b contains n entries, one for each body to be + considered. If n = 0, no gravitational light deflection will be + applied, not even for the Sun. + + 4) The struct b should include an entry for the Sun as well as for + any planet or other body to be taken into account. The entries + should be in the order in which the light passes the body. + + 5) In the entry in the b struct for body i, the mass parameter + b[i].bm can, as required, be adjusted in order to allow for such + effects as quadrupole field. + + 6) The deflection limiter parameter b[i].dl is phi^2/2, where phi is + the angular separation (in radians) between star and body at + which limiting is applied. As phi shrinks below the chosen + threshold, the deflection is artificially reduced, reaching zero + for phi = 0. Example values suitable for a terrestrial + observer, together with masses, are as follows: + + body i b[i].bm b[i].dl + + Sun 1.0 6e-6 + Jupiter 0.00095435 3e-9 + Saturn 0.00028574 3e-10 + + 7) For efficiency, validation of the contents of the b array is + omitted. The supplied masses must be greater than zero, the + position and velocity vectors must be right, and the deflection + limiter greater than zero. + + Called: + eraS2c spherical coordinates to unit vector + eraTrxp product of transpose of r-matrix and p-vector + eraZp zero p-vector + eraAb stellar aberration + eraLdn light deflection by n bodies + eraC2s p-vector to spherical + eraAnp normalize angle into range +/- pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ri_in = numpy.array(ri, dtype=numpy.double, order="C", copy=False, subok=True) + di_in = numpy.array(di, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + n_in = numpy.array(n, dtype=numpy.intc, order="C", copy=False, subok=True) + b_in = numpy.array(b, dtype=dt_eraLDBODY, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ri_in, di_in, astrom_in, n_in, b_in) + rc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ri_in, di_in, astrom_in, n_in, b_in, rc_out, dc_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*5 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._aticqn(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc_out.shape) > 0 and rc_out.shape[0] == 1 + rc_out = rc_out.reshape(rc_out.shape[1:]) + assert len(dc_out.shape) > 0 and dc_out.shape[0] == 1 + dc_out = dc_out.reshape(dc_out.shape[1:]) + + return rc_out, dc_out + + +def atio13(ri, di, utc1, utc2, dut1, elong, phi, hm, xp, yp, phpa, tc, rh, wl): + """ + Wrapper for ERFA function ``eraAtio13``. + + Parameters + ---------- + ri : double array + di : double array + utc1 : double array + utc2 : double array + dut1 : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + phpa : double array + tc : double array + rh : double array + wl : double array + + Returns + ------- + aob : double array + zob : double array + hob : double array + dob : double array + rob : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t i o 1 3 + - - - - - - - - - - + + CIRS RA,Dec to observed place. The caller supplies UTC, site + coordinates, ambient air conditions and observing wavelength. + + Given: + ri double CIRS right ascension (CIO-based, radians) + di double CIRS declination (radians) + utc1 double UTC as a 2-part... + utc2 double ...quasi Julian Date (Notes 1,2) + dut1 double UT1-UTC (seconds, Note 3) + elong double longitude (radians, east +ve, Note 4) + phi double geodetic latitude (radians, Note 4) + hm double height above ellipsoid (m, geodetic Notes 4,6) + xp,yp double polar motion coordinates (radians, Note 5) + phpa double pressure at the observer (hPa = mB, Note 6) + tc double ambient temperature at the observer (deg C) + rh double relative humidity at the observer (range 0-1) + wl double wavelength (micrometers, Note 7) + + Returned: + aob double* observed azimuth (radians: N=0,E=90) + zob double* observed zenith distance (radians) + hob double* observed hour angle (radians) + dob double* observed declination (radians) + rob double* observed right ascension (CIO-based, radians) + + Returned (function value): + int status: +1 = dubious year (Note 2) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + However, JD cannot unambiguously represent UTC during a leap + second unless special measures are taken. The convention in the + present function is that the JD day represents UTC days whether + the length is 86399, 86400 or 86401 SI seconds. + + Applications should use the function eraDtf2d to convert from + calendar date and time of day into 2-part quasi Julian Date, as + it implements the leap-second-ambiguity convention just + described. + + 2) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the + future to be trusted. See eraDat for further details. + + 3) UT1-UTC is tabulated in IERS bulletins. It increases by exactly + one second at the end of each positive UTC leap second, + introduced in order to keep UT1-UTC within +/- 0.9s. n.b. This + practice is under review, and in the future UT1-UTC may grow + essentially without limit. + + 4) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN: the + longitude required by the present function is east-positive + (i.e. right-handed), in accordance with geographical convention. + + 5) The polar motion xp,yp can be obtained from IERS bulletins. The + values are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions 2003), measured along the + meridians 0 and 90 deg west respectively. For many + applications, xp and yp can be set to zero. + + 6) If hm, the height above the ellipsoid of the observing station + in meters, is not known but phpa, the pressure in hPa (=mB), is + available, an adequate estimate of hm can be obtained from the + expression + + hm = -29.3 * tsl * log ( phpa / 1013.25 ); + + where tsl is the approximate sea-level air temperature in K + (See Astrophysical Quantities, C.W.Allen, 3rd edition, section + 52). Similarly, if the pressure phpa is not known, it can be + estimated from the height of the observing station, hm, as + follows: + + phpa = 1013.25 * exp ( -hm / ( 29.3 * tsl ) ); + + Note, however, that the refraction is nearly proportional to + the pressure and that an accurate phpa value is important for + precise work. + + 7) The argument wl specifies the observing wavelength in + micrometers. The transition from optical to radio is assumed to + occur at 100 micrometers (about 3000 GHz). + + 8) "Observed" Az,ZD means the position that would be seen by a + perfect geodetically aligned theodolite. (Zenith distance is + used rather than altitude in order to reflect the fact that no + allowance is made for depression of the horizon.) This is + related to the observed HA,Dec via the standard rotation, using + the geodetic latitude (corrected for polar motion), while the + observed HA and RA are related simply through the Earth rotation + angle and the site longitude. "Observed" RA,Dec or HA,Dec thus + means the position that would be seen by a perfect equatorial + with its polar axis aligned to the Earth's axis of rotation. + + 9) The accuracy of the result is limited by the corrections for + refraction, which use a simple A*tan(z) + B*tan^3(z) model. + Providing the meteorological parameters are known accurately and + there are no gross local effects, the predicted astrometric + coordinates should be within 0.05 arcsec (optical) or 1 arcsec + (radio) for a zenith distance of less than 70 degrees, better + than 30 arcsec (optical or radio) at 85 degrees and better + than 20 arcmin (optical) or 30 arcmin (radio) at the horizon. + + 10) The complementary functions eraAtio13 and eraAtoi13 are self- + consistent to better than 1 microarcsecond all over the + celestial sphere. + + 11) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + Called: + eraApio13 astrometry parameters, CIRS-observed, 2013 + eraAtioq quick CIRS to observed + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ri_in = numpy.array(ri, dtype=numpy.double, order="C", copy=False, subok=True) + di_in = numpy.array(di, dtype=numpy.double, order="C", copy=False, subok=True) + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + phpa_in = numpy.array(phpa, dtype=numpy.double, order="C", copy=False, subok=True) + tc_in = numpy.array(tc, dtype=numpy.double, order="C", copy=False, subok=True) + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + wl_in = numpy.array(wl, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ri_in, di_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in) + aob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + zob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + hob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ri_in, di_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in, aob_out, zob_out, hob_out, dob_out, rob_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*14 + [['readwrite']]*6 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atio13(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'atio13') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(aob_out.shape) > 0 and aob_out.shape[0] == 1 + aob_out = aob_out.reshape(aob_out.shape[1:]) + assert len(zob_out.shape) > 0 and zob_out.shape[0] == 1 + zob_out = zob_out.reshape(zob_out.shape[1:]) + assert len(hob_out.shape) > 0 and hob_out.shape[0] == 1 + hob_out = hob_out.reshape(hob_out.shape[1:]) + assert len(dob_out.shape) > 0 and dob_out.shape[0] == 1 + dob_out = dob_out.reshape(dob_out.shape[1:]) + assert len(rob_out.shape) > 0 and rob_out.shape[0] == 1 + rob_out = rob_out.reshape(rob_out.shape[1:]) + + return aob_out, zob_out, hob_out, dob_out, rob_out +STATUS_CODES['atio13'] = {1: 'dubious year (Note 2)', 0: 'OK', -1: 'unacceptable date'} + + + +def atioq(ri, di, astrom): + """ + Wrapper for ERFA function ``eraAtioq``. + + Parameters + ---------- + ri : double array + di : double array + astrom : eraASTROM array + + Returns + ------- + aob : double array + zob : double array + hob : double array + dob : double array + rob : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a A t i o q + - - - - - - - - - + + Quick CIRS to observed place transformation. + + Use of this function is appropriate when efficiency is important and + where many star positions are all to be transformed for one date. + The star-independent astrometry parameters can be obtained by + calling eraApio[13] or eraApco[13]. + + Given: + ri double CIRS right ascension + di double CIRS declination + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Returned: + aob double* observed azimuth (radians: N=0,E=90) + zob double* observed zenith distance (radians) + hob double* observed hour angle (radians) + dob double* observed declination (radians) + rob double* observed right ascension (CIO-based, radians) + + Notes: + + 1) This function returns zenith distance rather than altitude in + order to reflect the fact that no allowance is made for + depression of the horizon. + + 2) The accuracy of the result is limited by the corrections for + refraction, which use a simple A*tan(z) + B*tan^3(z) model. + Providing the meteorological parameters are known accurately and + there are no gross local effects, the predicted observed + coordinates should be within 0.05 arcsec (optical) or 1 arcsec + (radio) for a zenith distance of less than 70 degrees, better + than 30 arcsec (optical or radio) at 85 degrees and better + than 20 arcmin (optical) or 30 arcmin (radio) at the horizon. + + Without refraction, the complementary functions eraAtioq and + eraAtoiq are self-consistent to better than 1 microarcsecond all + over the celestial sphere. With refraction included, consistency + falls off at high zenith distances, but is still better than + 0.05 arcsec at 85 degrees. + + 3) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + 4) The CIRS RA,Dec is obtained from a star catalog mean place by + allowing for space motion, parallax, the Sun's gravitational lens + effect, annual aberration and precession-nutation. For star + positions in the ICRS, these effects can be applied by means of + the eraAtci13 (etc.) functions. Starting from classical "mean + place" systems, additional transformations will be needed first. + + 5) "Observed" Az,El means the position that would be seen by a + perfect geodetically aligned theodolite. This is obtained from + the CIRS RA,Dec by allowing for Earth orientation and diurnal + aberration, rotating from equator to horizon coordinates, and + then adjusting for refraction. The HA,Dec is obtained by + rotating back into equatorial coordinates, and is the position + that would be seen by a perfect equatorial with its polar axis + aligned to the Earth's axis of rotation. Finally, the RA is + obtained by subtracting the HA from the local ERA. + + 6) The star-independent CIRS-to-observed-place parameters in ASTROM + may be computed with eraApio[13] or eraApco[13]. If nothing has + changed significantly except the time, eraAper[13] may be used to + perform the requisite adjustment to the astrom structure. + + Called: + eraS2c spherical coordinates to unit vector + eraC2s p-vector to spherical + eraAnp normalize angle into range 0 to 2pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ri_in = numpy.array(ri, dtype=numpy.double, order="C", copy=False, subok=True) + di_in = numpy.array(di, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ri_in, di_in, astrom_in) + aob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + zob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + hob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rob_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ri_in, di_in, astrom_in, aob_out, zob_out, hob_out, dob_out, rob_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*5 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atioq(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(aob_out.shape) > 0 and aob_out.shape[0] == 1 + aob_out = aob_out.reshape(aob_out.shape[1:]) + assert len(zob_out.shape) > 0 and zob_out.shape[0] == 1 + zob_out = zob_out.reshape(zob_out.shape[1:]) + assert len(hob_out.shape) > 0 and hob_out.shape[0] == 1 + hob_out = hob_out.reshape(hob_out.shape[1:]) + assert len(dob_out.shape) > 0 and dob_out.shape[0] == 1 + dob_out = dob_out.reshape(dob_out.shape[1:]) + assert len(rob_out.shape) > 0 and rob_out.shape[0] == 1 + rob_out = rob_out.reshape(rob_out.shape[1:]) + + return aob_out, zob_out, hob_out, dob_out, rob_out + + +def atoc13(type, ob1, ob2, utc1, utc2, dut1, elong, phi, hm, xp, yp, phpa, tc, rh, wl): + """ + Wrapper for ERFA function ``eraAtoc13``. + + Parameters + ---------- + type : const char array + ob1 : double array + ob2 : double array + utc1 : double array + utc2 : double array + dut1 : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + phpa : double array + tc : double array + rh : double array + wl : double array + + Returns + ------- + rc : double array + dc : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t o c 1 3 + - - - - - - - - - - + + Observed place at a groundbased site to to ICRS astrometric RA,Dec. + The caller supplies UTC, site coordinates, ambient air conditions + and observing wavelength. + + Given: + type char[] type of coordinates - "R", "H" or "A" (Notes 1,2) + ob1 double observed Az, HA or RA (radians; Az is N=0,E=90) + ob2 double observed ZD or Dec (radians) + utc1 double UTC as a 2-part... + utc2 double ...quasi Julian Date (Notes 3,4) + dut1 double UT1-UTC (seconds, Note 5) + elong double longitude (radians, east +ve, Note 6) + phi double geodetic latitude (radians, Note 6) + hm double height above ellipsoid (m, geodetic Notes 6,8) + xp,yp double polar motion coordinates (radians, Note 7) + phpa double pressure at the observer (hPa = mB, Note 8) + tc double ambient temperature at the observer (deg C) + rh double relative humidity at the observer (range 0-1) + wl double wavelength (micrometers, Note 9) + + Returned: + rc,dc double ICRS astrometric RA,Dec (radians) + + Returned (function value): + int status: +1 = dubious year (Note 4) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) "Observed" Az,ZD means the position that would be seen by a + perfect geodetically aligned theodolite. (Zenith distance is + used rather than altitude in order to reflect the fact that no + allowance is made for depression of the horizon.) This is + related to the observed HA,Dec via the standard rotation, using + the geodetic latitude (corrected for polar motion), while the + observed HA and RA are related simply through the Earth rotation + angle and the site longitude. "Observed" RA,Dec or HA,Dec thus + means the position that would be seen by a perfect equatorial + with its polar axis aligned to the Earth's axis of rotation. + + 2) Only the first character of the type argument is significant. + "R" or "r" indicates that ob1 and ob2 are the observed right + ascension and declination; "H" or "h" indicates that they are + hour angle (west +ve) and declination; anything else ("A" or + "a" is recommended) indicates that ob1 and ob2 are azimuth + (north zero, east 90 deg) and zenith distance. + + 3) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + However, JD cannot unambiguously represent UTC during a leap + second unless special measures are taken. The convention in the + present function is that the JD day represents UTC days whether + the length is 86399, 86400 or 86401 SI seconds. + + Applications should use the function eraDtf2d to convert from + calendar date and time of day into 2-part quasi Julian Date, as + it implements the leap-second-ambiguity convention just + described. + + 4) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the + future to be trusted. See eraDat for further details. + + 5) UT1-UTC is tabulated in IERS bulletins. It increases by exactly + one second at the end of each positive UTC leap second, + introduced in order to keep UT1-UTC within +/- 0.9s. n.b. This + practice is under review, and in the future UT1-UTC may grow + essentially without limit. + + 6) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN: the + longitude required by the present function is east-positive + (i.e. right-handed), in accordance with geographical convention. + + 7) The polar motion xp,yp can be obtained from IERS bulletins. The + values are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions 2003), measured along the + meridians 0 and 90 deg west respectively. For many + applications, xp and yp can be set to zero. + + 8) If hm, the height above the ellipsoid of the observing station + in meters, is not known but phpa, the pressure in hPa (=mB), is + available, an adequate estimate of hm can be obtained from the + expression + + hm = -29.3 * tsl * log ( phpa / 1013.25 ); + + where tsl is the approximate sea-level air temperature in K + (See Astrophysical Quantities, C.W.Allen, 3rd edition, section + 52). Similarly, if the pressure phpa is not known, it can be + estimated from the height of the observing station, hm, as + follows: + + phpa = 1013.25 * exp ( -hm / ( 29.3 * tsl ) ); + + Note, however, that the refraction is nearly proportional to + the pressure and that an accurate phpa value is important for + precise work. + + 9) The argument wl specifies the observing wavelength in + micrometers. The transition from optical to radio is assumed to + occur at 100 micrometers (about 3000 GHz). + + 10) The accuracy of the result is limited by the corrections for + refraction, which use a simple A*tan(z) + B*tan^3(z) model. + Providing the meteorological parameters are known accurately and + there are no gross local effects, the predicted astrometric + coordinates should be within 0.05 arcsec (optical) or 1 arcsec + (radio) for a zenith distance of less than 70 degrees, better + than 30 arcsec (optical or radio) at 85 degrees and better + than 20 arcmin (optical) or 30 arcmin (radio) at the horizon. + + Without refraction, the complementary functions eraAtco13 and + eraAtoc13 are self-consistent to better than 1 microarcsecond + all over the celestial sphere. With refraction included, + consistency falls off at high zenith distances, but is still + better than 0.05 arcsec at 85 degrees. + + 11) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + Called: + eraApco13 astrometry parameters, ICRS-observed + eraAtoiq quick observed to CIRS + eraAticq quick CIRS to ICRS + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + type_in = numpy.array(type, dtype=numpy.dtype('S16'), order="C", copy=False, subok=True) + ob1_in = numpy.array(ob1, dtype=numpy.double, order="C", copy=False, subok=True) + ob2_in = numpy.array(ob2, dtype=numpy.double, order="C", copy=False, subok=True) + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + phpa_in = numpy.array(phpa, dtype=numpy.double, order="C", copy=False, subok=True) + tc_in = numpy.array(tc, dtype=numpy.double, order="C", copy=False, subok=True) + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + wl_in = numpy.array(wl, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), type_in, ob1_in, ob2_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in) + rc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dc_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [type_in, ob1_in, ob2_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in, rc_out, dc_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*15 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atoc13(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'atoc13') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc_out.shape) > 0 and rc_out.shape[0] == 1 + rc_out = rc_out.reshape(rc_out.shape[1:]) + assert len(dc_out.shape) > 0 and dc_out.shape[0] == 1 + dc_out = dc_out.reshape(dc_out.shape[1:]) + + return rc_out, dc_out +STATUS_CODES['atoc13'] = {1: 'dubious year (Note 4)', 0: 'OK', -1: 'unacceptable date'} + + + +def atoi13(type, ob1, ob2, utc1, utc2, dut1, elong, phi, hm, xp, yp, phpa, tc, rh, wl): + """ + Wrapper for ERFA function ``eraAtoi13``. + + Parameters + ---------- + type : const char array + ob1 : double array + ob2 : double array + utc1 : double array + utc2 : double array + dut1 : double array + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + phpa : double array + tc : double array + rh : double array + wl : double array + + Returns + ------- + ri : double array + di : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a A t o i 1 3 + - - - - - - - - - - + + Observed place to CIRS. The caller supplies UTC, site coordinates, + ambient air conditions and observing wavelength. + + Given: + type char[] type of coordinates - "R", "H" or "A" (Notes 1,2) + ob1 double observed Az, HA or RA (radians; Az is N=0,E=90) + ob2 double observed ZD or Dec (radians) + utc1 double UTC as a 2-part... + utc2 double ...quasi Julian Date (Notes 3,4) + dut1 double UT1-UTC (seconds, Note 5) + elong double longitude (radians, east +ve, Note 6) + phi double geodetic latitude (radians, Note 6) + hm double height above the ellipsoid (meters, Notes 6,8) + xp,yp double polar motion coordinates (radians, Note 7) + phpa double pressure at the observer (hPa = mB, Note 8) + tc double ambient temperature at the observer (deg C) + rh double relative humidity at the observer (range 0-1) + wl double wavelength (micrometers, Note 9) + + Returned: + ri double* CIRS right ascension (CIO-based, radians) + di double* CIRS declination (radians) + + Returned (function value): + int status: +1 = dubious year (Note 2) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) "Observed" Az,ZD means the position that would be seen by a + perfect geodetically aligned theodolite. (Zenith distance is + used rather than altitude in order to reflect the fact that no + allowance is made for depression of the horizon.) This is + related to the observed HA,Dec via the standard rotation, using + the geodetic latitude (corrected for polar motion), while the + observed HA and RA are related simply through the Earth rotation + angle and the site longitude. "Observed" RA,Dec or HA,Dec thus + means the position that would be seen by a perfect equatorial + with its polar axis aligned to the Earth's axis of rotation. + + 2) Only the first character of the type argument is significant. + "R" or "r" indicates that ob1 and ob2 are the observed right + ascension and declination; "H" or "h" indicates that they are + hour angle (west +ve) and declination; anything else ("A" or + "a" is recommended) indicates that ob1 and ob2 are azimuth + (north zero, east 90 deg) and zenith distance. + + 3) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + However, JD cannot unambiguously represent UTC during a leap + second unless special measures are taken. The convention in the + present function is that the JD day represents UTC days whether + the length is 86399, 86400 or 86401 SI seconds. + + Applications should use the function eraDtf2d to convert from + calendar date and time of day into 2-part quasi Julian Date, as + it implements the leap-second-ambiguity convention just + described. + + 4) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the + future to be trusted. See eraDat for further details. + + 5) UT1-UTC is tabulated in IERS bulletins. It increases by exactly + one second at the end of each positive UTC leap second, + introduced in order to keep UT1-UTC within +/- 0.9s. n.b. This + practice is under review, and in the future UT1-UTC may grow + essentially without limit. + + 6) The geographical coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. TAKE CARE WITH THE LONGITUDE SIGN: the + longitude required by the present function is east-positive + (i.e. right-handed), in accordance with geographical convention. + + 7) The polar motion xp,yp can be obtained from IERS bulletins. The + values are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions 2003), measured along the + meridians 0 and 90 deg west respectively. For many + applications, xp and yp can be set to zero. + + 8) If hm, the height above the ellipsoid of the observing station + in meters, is not known but phpa, the pressure in hPa (=mB), is + available, an adequate estimate of hm can be obtained from the + expression + + hm = -29.3 * tsl * log ( phpa / 1013.25 ); + + where tsl is the approximate sea-level air temperature in K + (See Astrophysical Quantities, C.W.Allen, 3rd edition, section + 52). Similarly, if the pressure phpa is not known, it can be + estimated from the height of the observing station, hm, as + follows: + + phpa = 1013.25 * exp ( -hm / ( 29.3 * tsl ) ); + + Note, however, that the refraction is nearly proportional to + the pressure and that an accurate phpa value is important for + precise work. + + 9) The argument wl specifies the observing wavelength in + micrometers. The transition from optical to radio is assumed to + occur at 100 micrometers (about 3000 GHz). + + 10) The accuracy of the result is limited by the corrections for + refraction, which use a simple A*tan(z) + B*tan^3(z) model. + Providing the meteorological parameters are known accurately and + there are no gross local effects, the predicted astrometric + coordinates should be within 0.05 arcsec (optical) or 1 arcsec + (radio) for a zenith distance of less than 70 degrees, better + than 30 arcsec (optical or radio) at 85 degrees and better + than 20 arcmin (optical) or 30 arcmin (radio) at the horizon. + + Without refraction, the complementary functions eraAtio13 and + eraAtoi13 are self-consistent to better than 1 microarcsecond + all over the celestial sphere. With refraction included, + consistency falls off at high zenith distances, but is still + better than 0.05 arcsec at 85 degrees. + + 12) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + Called: + eraApio13 astrometry parameters, CIRS-observed, 2013 + eraAtoiq quick observed to CIRS + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + type_in = numpy.array(type, dtype=numpy.dtype('S16'), order="C", copy=False, subok=True) + ob1_in = numpy.array(ob1, dtype=numpy.double, order="C", copy=False, subok=True) + ob2_in = numpy.array(ob2, dtype=numpy.double, order="C", copy=False, subok=True) + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + phpa_in = numpy.array(phpa, dtype=numpy.double, order="C", copy=False, subok=True) + tc_in = numpy.array(tc, dtype=numpy.double, order="C", copy=False, subok=True) + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + wl_in = numpy.array(wl, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), type_in, ob1_in, ob2_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in) + ri_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + di_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [type_in, ob1_in, ob2_in, utc1_in, utc2_in, dut1_in, elong_in, phi_in, hm_in, xp_in, yp_in, phpa_in, tc_in, rh_in, wl_in, ri_out, di_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*15 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atoi13(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'atoi13') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ri_out.shape) > 0 and ri_out.shape[0] == 1 + ri_out = ri_out.reshape(ri_out.shape[1:]) + assert len(di_out.shape) > 0 and di_out.shape[0] == 1 + di_out = di_out.reshape(di_out.shape[1:]) + + return ri_out, di_out +STATUS_CODES['atoi13'] = {1: 'dubious year (Note 2)', 0: 'OK', -1: 'unacceptable date'} + + + +def atoiq(type, ob1, ob2, astrom): + """ + Wrapper for ERFA function ``eraAtoiq``. + + Parameters + ---------- + type : const char array + ob1 : double array + ob2 : double array + astrom : eraASTROM array + + Returns + ------- + ri : double array + di : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a A t o i q + - - - - - - - - - + + Quick observed place to CIRS, given the star-independent astrometry + parameters. + + Use of this function is appropriate when efficiency is important and + where many star positions are all to be transformed for one date. + The star-independent astrometry parameters can be obtained by + calling eraApio[13] or eraApco[13]. + + Given: + type char[] type of coordinates: "R", "H" or "A" (Note 1) + ob1 double observed Az, HA or RA (radians; Az is N=0,E=90) + ob2 double observed ZD or Dec (radians) + astrom eraASTROM* star-independent astrometry parameters: + pmt double PM time interval (SSB, Julian years) + eb double[3] SSB to observer (vector, au) + eh double[3] Sun to observer (unit vector) + em double distance from Sun to observer (au) + v double[3] barycentric observer velocity (vector, c) + bm1 double sqrt(1-|v|^2): reciprocal of Lorenz factor + bpn double[3][3] bias-precession-nutation matrix + along double longitude + s' (radians) + xpl double polar motion xp wrt local meridian (radians) + ypl double polar motion yp wrt local meridian (radians) + sphi double sine of geodetic latitude + cphi double cosine of geodetic latitude + diurab double magnitude of diurnal aberration vector + eral double "local" Earth rotation angle (radians) + refa double refraction constant A (radians) + refb double refraction constant B (radians) + + Returned: + ri double* CIRS right ascension (CIO-based, radians) + di double* CIRS declination (radians) + + Notes: + + 1) "Observed" Az,El means the position that would be seen by a + perfect geodetically aligned theodolite. This is related to + the observed HA,Dec via the standard rotation, using the geodetic + latitude (corrected for polar motion), while the observed HA and + RA are related simply through the Earth rotation angle and the + site longitude. "Observed" RA,Dec or HA,Dec thus means the + position that would be seen by a perfect equatorial with its + polar axis aligned to the Earth's axis of rotation. By removing + from the observed place the effects of atmospheric refraction and + diurnal aberration, the CIRS RA,Dec is obtained. + + 2) Only the first character of the type argument is significant. + "R" or "r" indicates that ob1 and ob2 are the observed right + ascension and declination; "H" or "h" indicates that they are + hour angle (west +ve) and declination; anything else ("A" or + "a" is recommended) indicates that ob1 and ob2 are azimuth (north + zero, east 90 deg) and zenith distance. (Zenith distance is used + rather than altitude in order to reflect the fact that no + allowance is made for depression of the horizon.) + + 3) The accuracy of the result is limited by the corrections for + refraction, which use a simple A*tan(z) + B*tan^3(z) model. + Providing the meteorological parameters are known accurately and + there are no gross local effects, the predicted observed + coordinates should be within 0.05 arcsec (optical) or 1 arcsec + (radio) for a zenith distance of less than 70 degrees, better + than 30 arcsec (optical or radio) at 85 degrees and better than + 20 arcmin (optical) or 30 arcmin (radio) at the horizon. + + Without refraction, the complementary functions eraAtioq and + eraAtoiq are self-consistent to better than 1 microarcsecond all + over the celestial sphere. With refraction included, consistency + falls off at high zenith distances, but is still better than + 0.05 arcsec at 85 degrees. + + 4) It is advisable to take great care with units, as even unlikely + values of the input parameters are accepted and processed in + accordance with the models used. + + Called: + eraS2c spherical coordinates to unit vector + eraC2s p-vector to spherical + eraAnp normalize angle into range 0 to 2pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + type_in = numpy.array(type, dtype=numpy.dtype('S16'), order="C", copy=False, subok=True) + ob1_in = numpy.array(ob1, dtype=numpy.double, order="C", copy=False, subok=True) + ob2_in = numpy.array(ob2, dtype=numpy.double, order="C", copy=False, subok=True) + astrom_in = numpy.array(astrom, dtype=dt_eraASTROM, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), type_in, ob1_in, ob2_in, astrom_in) + ri_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + di_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [type_in, ob1_in, ob2_in, astrom_in, ri_out, di_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._atoiq(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ri_out.shape) > 0 and ri_out.shape[0] == 1 + ri_out = ri_out.reshape(ri_out.shape[1:]) + assert len(di_out.shape) > 0 and di_out.shape[0] == 1 + di_out = di_out.reshape(di_out.shape[1:]) + + return ri_out, di_out + + +def ld(bm, p, q, e, em, dlim): + """ + Wrapper for ERFA function ``eraLd``. + + Parameters + ---------- + bm : double array + p : double array + q : double array + e : double array + em : double array + dlim : double array + + Returns + ------- + p1 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - + e r a L d + - - - - - - + + Apply light deflection by a solar-system body, as part of + transforming coordinate direction into natural direction. + + Given: + bm double mass of the gravitating body (solar masses) + p double[3] direction from observer to source (unit vector) + q double[3] direction from body to source (unit vector) + e double[3] direction from body to observer (unit vector) + em double distance from body to observer (au) + dlim double deflection limiter (Note 4) + + Returned: + p1 double[3] observer to deflected source (unit vector) + + Notes: + + 1) The algorithm is based on Expr. (70) in Klioner (2003) and + Expr. (7.63) in the Explanatory Supplement (Urban & Seidelmann + 2013), with some rearrangement to minimize the effects of machine + precision. + + 2) The mass parameter bm can, as required, be adjusted in order to + allow for such effects as quadrupole field. + + 3) The barycentric position of the deflecting body should ideally + correspond to the time of closest approach of the light ray to + the body. + + 4) The deflection limiter parameter dlim is phi^2/2, where phi is + the angular separation (in radians) between source and body at + which limiting is applied. As phi shrinks below the chosen + threshold, the deflection is artificially reduced, reaching zero + for phi = 0. + + 5) The returned vector p1 is not normalized, but the consequential + departure from unit magnitude is always negligible. + + 6) The arguments p and p1 can be the same array. + + 7) To accumulate total light deflection taking into account the + contributions from several bodies, call the present function for + each body in succession, in decreasing order of distance from the + observer. + + 8) For efficiency, validation is omitted. The supplied vectors must + be of unit magnitude, and the deflection limiter non-zero and + positive. + + References: + + Urban, S. & Seidelmann, P. K. (eds), Explanatory Supplement to + the Astronomical Almanac, 3rd ed., University Science Books + (2013). + + Klioner, Sergei A., "A practical relativistic model for micro- + arcsecond astrometry in space", Astr. J. 125, 1580-1597 (2003). + + Called: + eraPdp scalar product of two p-vectors + eraPxp vector product of two p-vectors + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + bm_in = numpy.array(bm, dtype=numpy.double, order="C", copy=False, subok=True) + p_in = numpy.array(p, dtype=numpy.double, order="C", copy=False, subok=True) + q_in = numpy.array(q, dtype=numpy.double, order="C", copy=False, subok=True) + e_in = numpy.array(e, dtype=numpy.double, order="C", copy=False, subok=True) + em_in = numpy.array(em, dtype=numpy.double, order="C", copy=False, subok=True) + dlim_in = numpy.array(dlim, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(p_in, (3,), "p") + check_trailing_shape(q_in, (3,), "q") + check_trailing_shape(e_in, (3,), "e") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), bm_in, p_in[...,0], q_in[...,0], e_in[...,0], em_in, dlim_in) + p1_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [bm_in, p_in[...,0], q_in[...,0], e_in[...,0], em_in, dlim_in, p1_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ld(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(p1_out.shape) > 0 and p1_out.shape[0] == 1 + p1_out = p1_out.reshape(p1_out.shape[1:]) + + return p1_out + + +def ldn(n, b, ob, sc): + """ + Wrapper for ERFA function ``eraLdn``. + + Parameters + ---------- + n : int array + b : eraLDBODY array + ob : double array + sc : double array + + Returns + ------- + sn : double array + + Notes + ----- + The ERFA documentation is below. + +/*+ + - - - - - - - + e r a L d n + - - - - - - - + + For a star, apply light deflection by multiple solar-system bodies, + as part of transforming coordinate direction into natural direction. + + Given: + n int number of bodies (note 1) + b eraLDBODY[n] data for each of the n bodies (Notes 1,2): + bm double mass of the body (solar masses, Note 3) + dl double deflection limiter (Note 4) + pv [2][3] barycentric PV of the body (au, au/day) + ob double[3] barycentric position of the observer (au) + sc double[3] observer to star coord direction (unit vector) + + Returned: + sn double[3] observer to deflected star (unit vector) + + 1) The array b contains n entries, one for each body to be + considered. If n = 0, no gravitational light deflection will be + applied, not even for the Sun. + + 2) The array b should include an entry for the Sun as well as for + any planet or other body to be taken into account. The entries + should be in the order in which the light passes the body. + + 3) In the entry in the b array for body i, the mass parameter + b[i].bm can, as required, be adjusted in order to allow for such + effects as quadrupole field. + + 4) The deflection limiter parameter b[i].dl is phi^2/2, where phi is + the angular separation (in radians) between star and body at + which limiting is applied. As phi shrinks below the chosen + threshold, the deflection is artificially reduced, reaching zero + for phi = 0. Example values suitable for a terrestrial + observer, together with masses, are as follows: + + body i b[i].bm b[i].dl + + Sun 1.0 6e-6 + Jupiter 0.00095435 3e-9 + Saturn 0.00028574 3e-10 + + 5) For cases where the starlight passes the body before reaching the + observer, the body is placed back along its barycentric track by + the light time from that point to the observer. For cases where + the body is "behind" the observer no such shift is applied. If + a different treatment is preferred, the user has the option of + instead using the eraLd function. Similarly, eraLd can be used + for cases where the source is nearby, not a star. + + 6) The returned vector sn is not normalized, but the consequential + departure from unit magnitude is always negligible. + + 7) The arguments sc and sn can be the same array. + + 8) For efficiency, validation is omitted. The supplied masses must + be greater than zero, the position and velocity vectors must be + right, and the deflection limiter greater than zero. + + Reference: + + Urban, S. & Seidelmann, P. K. (eds), Explanatory Supplement to + the Astronomical Almanac, 3rd ed., University Science Books + (2013), Section 7.2.4. + + Called: + eraCp copy p-vector + eraPdp scalar product of two p-vectors + eraPmp p-vector minus p-vector + eraPpsp p-vector plus scaled p-vector + eraPn decompose p-vector into modulus and direction + eraLd light deflection by a solar-system body + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + n_in = numpy.array(n, dtype=numpy.intc, order="C", copy=False, subok=True) + b_in = numpy.array(b, dtype=dt_eraLDBODY, order="C", copy=False, subok=True) + ob_in = numpy.array(ob, dtype=numpy.double, order="C", copy=False, subok=True) + sc_in = numpy.array(sc, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(ob_in, (3,), "ob") + check_trailing_shape(sc_in, (3,), "sc") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), n_in, b_in, ob_in[...,0], sc_in[...,0]) + sn_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [n_in, b_in, ob_in[...,0], sc_in[...,0], sn_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ldn(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(sn_out.shape) > 0 and sn_out.shape[0] == 1 + sn_out = sn_out.reshape(sn_out.shape[1:]) + + return sn_out + + +def ldsun(p, e, em): + """ + Wrapper for ERFA function ``eraLdsun``. + + Parameters + ---------- + p : double array + e : double array + em : double array + + Returns + ------- + p1 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a L d s u n + - - - - - - - - - + + Deflection of starlight by the Sun. + + Given: + p double[3] direction from observer to star (unit vector) + e double[3] direction from Sun to observer (unit vector) + em double distance from Sun to observer (au) + + Returned: + p1 double[3] observer to deflected star (unit vector) + + Notes: + + 1) The source is presumed to be sufficiently distant that its + directions seen from the Sun and the observer are essentially + the same. + + 2) The deflection is restrained when the angle between the star and + the center of the Sun is less than a threshold value, falling to + zero deflection for zero separation. The chosen threshold value + is within the solar limb for all solar-system applications, and + is about 5 arcminutes for the case of a terrestrial observer. + + 3) The arguments p and p1 can be the same array. + + Called: + eraLd light deflection by a solar-system body + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + p_in = numpy.array(p, dtype=numpy.double, order="C", copy=False, subok=True) + e_in = numpy.array(e, dtype=numpy.double, order="C", copy=False, subok=True) + em_in = numpy.array(em, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(p_in, (3,), "p") + check_trailing_shape(e_in, (3,), "e") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), p_in[...,0], e_in[...,0], em_in) + p1_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [p_in[...,0], e_in[...,0], em_in, p1_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ldsun(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(p1_out.shape) > 0 and p1_out.shape[0] == 1 + p1_out = p1_out.reshape(p1_out.shape[1:]) + + return p1_out + + +def pmpx(rc, dc, pr, pd, px, rv, pmt, pob): + """ + Wrapper for ERFA function ``eraPmpx``. + + Parameters + ---------- + rc : double array + dc : double array + pr : double array + pd : double array + px : double array + rv : double array + pmt : double array + pob : double array + + Returns + ------- + pco : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a P m p x + - - - - - - - - + + Proper motion and parallax. + + Given: + rc,dc double ICRS RA,Dec at catalog epoch (radians) + pr double RA proper motion (radians/year; Note 1) + pd double Dec proper motion (radians/year) + px double parallax (arcsec) + rv double radial velocity (km/s, +ve if receding) + pmt double proper motion time interval (SSB, Julian years) + pob double[3] SSB to observer vector (au) + + Returned: + pco double[3] coordinate direction (BCRS unit vector) + + Notes: + + 1) The proper motion in RA is dRA/dt rather than cos(Dec)*dRA/dt. + + 2) The proper motion time interval is for when the starlight + reaches the solar system barycenter. + + 3) To avoid the need for iteration, the Roemer effect (i.e. the + small annual modulation of the proper motion coming from the + changing light time) is applied approximately, using the + direction of the star at the catalog epoch. + + References: + + 1984 Astronomical Almanac, pp B39-B41. + + Urban, S. & Seidelmann, P. K. (eds), Explanatory Supplement to + the Astronomical Almanac, 3rd ed., University Science Books + (2013), Section 7.2. + + Called: + eraPdp scalar product of two p-vectors + eraPn decompose p-vector into modulus and direction + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rc_in = numpy.array(rc, dtype=numpy.double, order="C", copy=False, subok=True) + dc_in = numpy.array(dc, dtype=numpy.double, order="C", copy=False, subok=True) + pr_in = numpy.array(pr, dtype=numpy.double, order="C", copy=False, subok=True) + pd_in = numpy.array(pd, dtype=numpy.double, order="C", copy=False, subok=True) + px_in = numpy.array(px, dtype=numpy.double, order="C", copy=False, subok=True) + rv_in = numpy.array(rv, dtype=numpy.double, order="C", copy=False, subok=True) + pmt_in = numpy.array(pmt, dtype=numpy.double, order="C", copy=False, subok=True) + pob_in = numpy.array(pob, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(pob_in, (3,), "pob") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rc_in, dc_in, pr_in, pd_in, px_in, rv_in, pmt_in, pob_in[...,0]) + pco_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rc_in, dc_in, pr_in, pd_in, px_in, rv_in, pmt_in, pob_in[...,0], pco_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*8 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pmpx(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(pco_out.shape) > 0 and pco_out.shape[0] == 1 + pco_out = pco_out.reshape(pco_out.shape[1:]) + + return pco_out + + +def pmsafe(ra1, dec1, pmr1, pmd1, px1, rv1, ep1a, ep1b, ep2a, ep2b): + """ + Wrapper for ERFA function ``eraPmsafe``. + + Parameters + ---------- + ra1 : double array + dec1 : double array + pmr1 : double array + pmd1 : double array + px1 : double array + rv1 : double array + ep1a : double array + ep1b : double array + ep2a : double array + ep2b : double array + + Returns + ------- + ra2 : double array + dec2 : double array + pmr2 : double array + pmd2 : double array + px2 : double array + rv2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P m s a f e + - - - - - - - - - - + + Star proper motion: update star catalog data for space motion, with + special handling to handle the zero parallax case. + + Given: + ra1 double right ascension (radians), before + dec1 double declination (radians), before + pmr1 double RA proper motion (radians/year), before + pmd1 double Dec proper motion (radians/year), before + px1 double parallax (arcseconds), before + rv1 double radial velocity (km/s, +ve = receding), before + ep1a double "before" epoch, part A (Note 1) + ep1b double "before" epoch, part B (Note 1) + ep2a double "after" epoch, part A (Note 1) + ep2b double "after" epoch, part B (Note 1) + + Returned: + ra2 double right ascension (radians), after + dec2 double declination (radians), after + pmr2 double RA proper motion (radians/year), after + pmd2 double Dec proper motion (radians/year), after + px2 double parallax (arcseconds), after + rv2 double radial velocity (km/s, +ve = receding), after + + Returned (function value): + int status: + -1 = system error (should not occur) + 0 = no warnings or errors + 1 = distance overridden (Note 6) + 2 = excessive velocity (Note 7) + 4 = solution didn't converge (Note 8) + else = binary logical OR of the above warnings + + Notes: + + 1) The starting and ending TDB epochs ep1a+ep1b and ep2a+ep2b are + Julian Dates, apportioned in any convenient way between the two + parts (A and B). For example, JD(TDB)=2450123.7 could be + expressed in any of these ways, among others: + + epNa epNb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. + + 2) In accordance with normal star-catalog conventions, the object's + right ascension and declination are freed from the effects of + secular aberration. The frame, which is aligned to the catalog + equator and equinox, is Lorentzian and centered on the SSB. + + The proper motions are the rate of change of the right ascension + and declination at the catalog epoch and are in radians per TDB + Julian year. + + The parallax and radial velocity are in the same frame. + + 3) Care is needed with units. The star coordinates are in radians + and the proper motions in radians per Julian year, but the + parallax is in arcseconds. + + 4) The RA proper motion is in terms of coordinate angle, not true + angle. If the catalog uses arcseconds for both RA and Dec proper + motions, the RA proper motion will need to be divided by cos(Dec) + before use. + + 5) Straight-line motion at constant speed, in the inertial frame, is + assumed. + + 6) An extremely small (or zero or negative) parallax is overridden + to ensure that the object is at a finite but very large distance, + but not so large that the proper motion is equivalent to a large + but safe speed (about 0.1c using the chosen constant). A warning + status of 1 is added to the status if this action has been taken. + + 7) If the space velocity is a significant fraction of c (see the + constant VMAX in the function eraStarpv), it is arbitrarily set + to zero. When this action occurs, 2 is added to the status. + + 8) The relativistic adjustment carried out in the eraStarpv function + involves an iterative calculation. If the process fails to + converge within a set number of iterations, 4 is added to the + status. + + Called: + eraSeps angle between two points + eraStarpm update star catalog data for space motion + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ra1_in = numpy.array(ra1, dtype=numpy.double, order="C", copy=False, subok=True) + dec1_in = numpy.array(dec1, dtype=numpy.double, order="C", copy=False, subok=True) + pmr1_in = numpy.array(pmr1, dtype=numpy.double, order="C", copy=False, subok=True) + pmd1_in = numpy.array(pmd1, dtype=numpy.double, order="C", copy=False, subok=True) + px1_in = numpy.array(px1, dtype=numpy.double, order="C", copy=False, subok=True) + rv1_in = numpy.array(rv1, dtype=numpy.double, order="C", copy=False, subok=True) + ep1a_in = numpy.array(ep1a, dtype=numpy.double, order="C", copy=False, subok=True) + ep1b_in = numpy.array(ep1b, dtype=numpy.double, order="C", copy=False, subok=True) + ep2a_in = numpy.array(ep2a, dtype=numpy.double, order="C", copy=False, subok=True) + ep2b_in = numpy.array(ep2b, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ra1_in, dec1_in, pmr1_in, pmd1_in, px1_in, rv1_in, ep1a_in, ep1b_in, ep2a_in, ep2b_in) + ra2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dec2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pmr2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pmd2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + px2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rv2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ra1_in, dec1_in, pmr1_in, pmd1_in, px1_in, rv1_in, ep1a_in, ep1b_in, ep2a_in, ep2b_in, ra2_out, dec2_out, pmr2_out, pmd2_out, px2_out, rv2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*10 + [['readwrite']]*7 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pmsafe(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'pmsafe') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ra2_out.shape) > 0 and ra2_out.shape[0] == 1 + ra2_out = ra2_out.reshape(ra2_out.shape[1:]) + assert len(dec2_out.shape) > 0 and dec2_out.shape[0] == 1 + dec2_out = dec2_out.reshape(dec2_out.shape[1:]) + assert len(pmr2_out.shape) > 0 and pmr2_out.shape[0] == 1 + pmr2_out = pmr2_out.reshape(pmr2_out.shape[1:]) + assert len(pmd2_out.shape) > 0 and pmd2_out.shape[0] == 1 + pmd2_out = pmd2_out.reshape(pmd2_out.shape[1:]) + assert len(px2_out.shape) > 0 and px2_out.shape[0] == 1 + px2_out = px2_out.reshape(px2_out.shape[1:]) + assert len(rv2_out.shape) > 0 and rv2_out.shape[0] == 1 + rv2_out = rv2_out.reshape(rv2_out.shape[1:]) + + return ra2_out, dec2_out, pmr2_out, pmd2_out, px2_out, rv2_out +STATUS_CODES['pmsafe'] = {-1: 'system error (should not occur)', 0: 'no warnings or errors', 1: 'distance overridden (Note 6)', 2: 'excessive velocity (Note 7)', 4: "solution didn't converge (Note 8)", 'else': 'binary logical OR of the above warnings'} + + + +def pvtob(elong, phi, hm, xp, yp, sp, theta): + """ + Wrapper for ERFA function ``eraPvtob``. + + Parameters + ---------- + elong : double array + phi : double array + hm : double array + xp : double array + yp : double array + sp : double array + theta : double array + + Returns + ------- + pv : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a P v t o b + - - - - - - - - - + + Position and velocity of a terrestrial observing station. + + Given: + elong double longitude (radians, east +ve, Note 1) + phi double latitude (geodetic, radians, Note 1) + hm double height above ref. ellipsoid (geodetic, m) + xp,yp double coordinates of the pole (radians, Note 2) + sp double the TIO locator s' (radians, Note 2) + theta double Earth rotation angle (radians, Note 3) + + Returned: + pv double[2][3] position/velocity vector (m, m/s, CIRS) + + Notes: + + 1) The terrestrial coordinates are with respect to the ERFA_WGS84 + reference ellipsoid. + + 2) xp and yp are the coordinates (in radians) of the Celestial + Intermediate Pole with respect to the International Terrestrial + Reference System (see IERS Conventions), measured along the + meridians 0 and 90 deg west respectively. sp is the TIO locator + s', in radians, which positions the Terrestrial Intermediate + Origin on the equator. For many applications, xp, yp and + (especially) sp can be set to zero. + + 3) If theta is Greenwich apparent sidereal time instead of Earth + rotation angle, the result is with respect to the true equator + and equinox of date, i.e. with the x-axis at the equinox rather + than the celestial intermediate origin. + + 4) The velocity units are meters per UT1 second, not per SI second. + This is unlikely to have any practical consequences in the modern + era. + + 5) No validation is performed on the arguments. Error cases that + could lead to arithmetic exceptions are trapped by the eraGd2gc + function, and the result set to zeros. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Urban, S. & Seidelmann, P. K. (eds), Explanatory Supplement to + the Astronomical Almanac, 3rd ed., University Science Books + (2013), Section 7.4.3.3. + + Called: + eraGd2gc geodetic to geocentric transformation + eraPom00 polar motion matrix + eraTrxp product of transpose of r-matrix and p-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + hm_in = numpy.array(hm, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + sp_in = numpy.array(sp, dtype=numpy.double, order="C", copy=False, subok=True) + theta_in = numpy.array(theta, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), elong_in, phi_in, hm_in, xp_in, yp_in, sp_in, theta_in) + pv_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [elong_in, phi_in, hm_in, xp_in, yp_in, sp_in, theta_in, pv_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*7 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pvtob(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(pv_out.shape) > 0 and pv_out.shape[0] == 1 + pv_out = pv_out.reshape(pv_out.shape[1:]) + + return pv_out + + +def refco(phpa, tc, rh, wl): + """ + Wrapper for ERFA function ``eraRefco``. + + Parameters + ---------- + phpa : double array + tc : double array + rh : double array + wl : double array + + Returns + ------- + refa : double array + refb : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a R e f c o + - - - - - - - - - + + Determine the constants A and B in the atmospheric refraction model + dZ = A tan Z + B tan^3 Z. + + Z is the "observed" zenith distance (i.e. affected by refraction) + and dZ is what to add to Z to give the "topocentric" (i.e. in vacuo) + zenith distance. + + Given: + phpa double pressure at the observer (hPa = millibar) + tc double ambient temperature at the observer (deg C) + rh double relative humidity at the observer (range 0-1) + wl double wavelength (micrometers) + + Returned: + refa double* tan Z coefficient (radians) + refb double* tan^3 Z coefficient (radians) + + Notes: + + 1) The model balances speed and accuracy to give good results in + applications where performance at low altitudes is not paramount. + Performance is maintained across a range of conditions, and + applies to both optical/IR and radio. + + 2) The model omits the effects of (i) height above sea level (apart + from the reduced pressure itself), (ii) latitude (i.e. the + flattening of the Earth), (iii) variations in tropospheric lapse + rate and (iv) dispersive effects in the radio. + + The model was tested using the following range of conditions: + + lapse rates 0.0055, 0.0065, 0.0075 deg/meter + latitudes 0, 25, 50, 75 degrees + heights 0, 2500, 5000 meters ASL + pressures mean for height -10% to +5% in steps of 5% + temperatures -10 deg to +20 deg with respect to 280 deg at SL + relative humidity 0, 0.5, 1 + wavelengths 0.4, 0.6, ... 2 micron, + radio + zenith distances 15, 45, 75 degrees + + The accuracy with respect to raytracing through a model + atmosphere was as follows: + + worst RMS + + optical/IR 62 mas 8 mas + radio 319 mas 49 mas + + For this particular set of conditions: + + lapse rate 0.0065 K/meter + latitude 50 degrees + sea level + pressure 1005 mb + temperature 280.15 K + humidity 80% + wavelength 5740 Angstroms + + the results were as follows: + + ZD raytrace eraRefco Saastamoinen + + 10 10.27 10.27 10.27 + 20 21.19 21.20 21.19 + 30 33.61 33.61 33.60 + 40 48.82 48.83 48.81 + 45 58.16 58.18 58.16 + 50 69.28 69.30 69.27 + 55 82.97 82.99 82.95 + 60 100.51 100.54 100.50 + 65 124.23 124.26 124.20 + 70 158.63 158.68 158.61 + 72 177.32 177.37 177.31 + 74 200.35 200.38 200.32 + 76 229.45 229.43 229.42 + 78 267.44 267.29 267.41 + 80 319.13 318.55 319.10 + + deg arcsec arcsec arcsec + + The values for Saastamoinen's formula (which includes terms + up to tan^5) are taken from Hohenkerk and Sinclair (1985). + + 3) A wl value in the range 0-100 selects the optical/IR case and is + wavelength in micrometers. Any value outside this range selects + the radio case. + + 4) Outlandish input parameters are silently limited to + mathematically safe values. Zero pressure is permissible, and + causes zeroes to be returned. + + 5) The algorithm draws on several sources, as follows: + + a) The formula for the saturation vapour pressure of water as + a function of temperature and temperature is taken from + Equations (A4.5-A4.7) of Gill (1982). + + b) The formula for the water vapour pressure, given the + saturation pressure and the relative humidity, is from + Crane (1976), Equation (2.5.5). + + c) The refractivity of air is a function of temperature, + total pressure, water-vapour pressure and, in the case + of optical/IR, wavelength. The formulae for the two cases are + developed from Hohenkerk & Sinclair (1985) and Rueger (2002). + + d) The formula for beta, the ratio of the scale height of the + atmosphere to the geocentric distance of the observer, is + an adaption of Equation (9) from Stone (1996). The + adaptations, arrived at empirically, consist of (i) a small + adjustment to the coefficient and (ii) a humidity term for the + radio case only. + + e) The formulae for the refraction constants as a function of + n-1 and beta are from Green (1987), Equation (4.31). + + References: + + Crane, R.K., Meeks, M.L. (ed), "Refraction Effects in the Neutral + Atmosphere", Methods of Experimental Physics: Astrophysics 12B, + Academic Press, 1976. + + Gill, Adrian E., "Atmosphere-Ocean Dynamics", Academic Press, + 1982. + + Green, R.M., "Spherical Astronomy", Cambridge University Press, + 1987. + + Hohenkerk, C.Y., & Sinclair, A.T., NAO Technical Note No. 63, + 1985. + + Rueger, J.M., "Refractive Index Formulae for Electronic Distance + Measurement with Radio and Millimetre Waves", in Unisurv Report + S-68, School of Surveying and Spatial Information Systems, + University of New South Wales, Sydney, Australia, 2002. + + Stone, Ronald C., P.A.S.P. 108, 1051-1058, 1996. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + phpa_in = numpy.array(phpa, dtype=numpy.double, order="C", copy=False, subok=True) + tc_in = numpy.array(tc, dtype=numpy.double, order="C", copy=False, subok=True) + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + wl_in = numpy.array(wl, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), phpa_in, tc_in, rh_in, wl_in) + refa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + refb_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [phpa_in, tc_in, rh_in, wl_in, refa_out, refb_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._refco(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(refa_out.shape) > 0 and refa_out.shape[0] == 1 + refa_out = refa_out.reshape(refa_out.shape[1:]) + assert len(refb_out.shape) > 0 and refb_out.shape[0] == 1 + refb_out = refb_out.reshape(refb_out.shape[1:]) + + return refa_out, refb_out + + +def epv00(date1, date2): + """ + Wrapper for ERFA function ``eraEpv00``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + pvh : double array + pvb : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E p v 0 0 + - - - - - - - - - + + Earth position and velocity, heliocentric and barycentric, with + respect to the Barycentric Celestial Reference System. + + Given: + date1,date2 double TDB date (Note 1) + + Returned: + pvh double[2][3] heliocentric Earth position/velocity + pvb double[2][3] barycentric Earth position/velocity + + Returned (function value): + int status: 0 = OK + +1 = warning: date outside + the range 1900-2100 AD + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, among + others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. However, + the accuracy of the result is more likely to be limited by the + algorithm itself than the way the date has been expressed. + + n.b. TT can be used instead of TDB in most applications. + + 2) On return, the arrays pvh and pvb contain the following: + + pvh[0][0] x } + pvh[0][1] y } heliocentric position, au + pvh[0][2] z } + + pvh[1][0] xdot } + pvh[1][1] ydot } heliocentric velocity, au/d + pvh[1][2] zdot } + + pvb[0][0] x } + pvb[0][1] y } barycentric position, au + pvb[0][2] z } + + pvb[1][0] xdot } + pvb[1][1] ydot } barycentric velocity, au/d + pvb[1][2] zdot } + + The vectors are with respect to the Barycentric Celestial + Reference System. The time unit is one day in TDB. + + 3) The function is a SIMPLIFIED SOLUTION from the planetary theory + VSOP2000 (X. Moisson, P. Bretagnon, 2001, Celes. Mechanics & + Dyn. Astron., 80, 3/4, 205-213) and is an adaptation of original + Fortran code supplied by P. Bretagnon (private comm., 2000). + + 4) Comparisons over the time span 1900-2100 with this simplified + solution and the JPL DE405 ephemeris give the following results: + + RMS max + Heliocentric: + position error 3.7 11.2 km + velocity error 1.4 5.0 mm/s + + Barycentric: + position error 4.6 13.4 km + velocity error 1.4 4.9 mm/s + + Comparisons with the JPL DE406 ephemeris show that by 1800 and + 2200 the position errors are approximately double their 1900-2100 + size. By 1500 and 2500 the deterioration is a factor of 10 and + by 1000 and 3000 a factor of 60. The velocity accuracy falls off + at about half that rate. + + 5) It is permissible to use the same array for pvh and pvb, which + will receive the barycentric values. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + pvh_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + pvb_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, pvh_out[...,0,0], pvb_out[...,0,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._epv00(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'epv00') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(pvh_out.shape) > 0 and pvh_out.shape[0] == 1 + pvh_out = pvh_out.reshape(pvh_out.shape[1:]) + assert len(pvb_out.shape) > 0 and pvb_out.shape[0] == 1 + pvb_out = pvb_out.reshape(pvb_out.shape[1:]) + + return pvh_out, pvb_out +STATUS_CODES['epv00'] = {0: 'OK', 1: 'warning: date outsidethe range 1900-2100 AD'} + + + +def plan94(date1, date2, np): + """ + Wrapper for ERFA function ``eraPlan94``. + + Parameters + ---------- + date1 : double array + date2 : double array + np : int array + + Returns + ------- + pv : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P l a n 9 4 + - - - - - - - - - - + + Approximate heliocentric position and velocity of a nominated major + planet: Mercury, Venus, EMB, Mars, Jupiter, Saturn, Uranus or + Neptune (but not the Earth itself). + + Given: + date1 double TDB date part A (Note 1) + date2 double TDB date part B (Note 1) + np int planet (1=Mercury, 2=Venus, 3=EMB, 4=Mars, + 5=Jupiter, 6=Saturn, 7=Uranus, 8=Neptune) + + Returned (argument): + pv double[2][3] planet p,v (heliocentric, J2000.0, au,au/d) + + Returned (function value): + int status: -1 = illegal NP (outside 1-8) + 0 = OK + +1 = warning: year outside 1000-3000 + +2 = warning: failed to converge + + Notes: + + 1) The date date1+date2 is in the TDB time scale (in practice TT can + be used) and is a Julian Date, apportioned in any convenient way + between the two arguments. For example, JD(TDB)=2450123.7 could + be expressed in any of these ways, among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + resolution. The MJD method and the date & time methods are both + good compromises between resolution and convenience. The limited + accuracy of the present algorithm is such that any of the methods + is satisfactory. + + 2) If an np value outside the range 1-8 is supplied, an error status + (function value -1) is returned and the pv vector set to zeroes. + + 3) For np=3 the result is for the Earth-Moon Barycenter. To obtain + the heliocentric position and velocity of the Earth, use instead + the ERFA function eraEpv00. + + 4) On successful return, the array pv contains the following: + + pv[0][0] x } + pv[0][1] y } heliocentric position, au + pv[0][2] z } + + pv[1][0] xdot } + pv[1][1] ydot } heliocentric velocity, au/d + pv[1][2] zdot } + + The reference frame is equatorial and is with respect to the + mean equator and equinox of epoch J2000.0. + + 5) The algorithm is due to J.L. Simon, P. Bretagnon, J. Chapront, + M. Chapront-Touze, G. Francou and J. Laskar (Bureau des + Longitudes, Paris, France). From comparisons with JPL + ephemeris DE102, they quote the following maximum errors + over the interval 1800-2050: + + L (arcsec) B (arcsec) R (km) + + Mercury 4 1 300 + Venus 5 1 800 + EMB 6 1 1000 + Mars 17 1 7700 + Jupiter 71 5 76000 + Saturn 81 13 267000 + Uranus 86 7 712000 + Neptune 11 1 253000 + + Over the interval 1000-3000, they report that the accuracy is no + worse than 1.5 times that over 1800-2050. Outside 1000-3000 the + accuracy declines. + + Comparisons of the present function with the JPL DE200 ephemeris + give the following RMS errors over the interval 1960-2025: + + position (km) velocity (m/s) + + Mercury 334 0.437 + Venus 1060 0.855 + EMB 2010 0.815 + Mars 7690 1.98 + Jupiter 71700 7.70 + Saturn 199000 19.4 + Uranus 564000 16.4 + Neptune 158000 14.4 + + Comparisons against DE200 over the interval 1800-2100 gave the + following maximum absolute differences. (The results using + DE406 were essentially the same.) + + L (arcsec) B (arcsec) R (km) Rdot (m/s) + + Mercury 7 1 500 0.7 + Venus 7 1 1100 0.9 + EMB 9 1 1300 1.0 + Mars 26 1 9000 2.5 + Jupiter 78 6 82000 8.2 + Saturn 87 14 263000 24.6 + Uranus 86 7 661000 27.4 + Neptune 11 2 248000 21.4 + + 6) The present ERFA re-implementation of the original Simon et al. + Fortran code differs from the original in the following respects: + + * C instead of Fortran. + + * The date is supplied in two parts. + + * The result is returned only in equatorial Cartesian form; + the ecliptic longitude, latitude and radius vector are not + returned. + + * The result is in the J2000.0 equatorial frame, not ecliptic. + + * More is done in-line: there are fewer calls to subroutines. + + * Different error/warning status values are used. + + * A different Kepler's-equation-solver is used (avoiding + use of double precision complex). + + * Polynomials in t are nested to minimize rounding errors. + + * Explicit double constants are used to avoid mixed-mode + expressions. + + None of the above changes affects the result significantly. + + 7) The returned status indicates the most serious condition + encountered during execution of the function. Illegal np is + considered the most serious, overriding failure to converge, + which in turn takes precedence over the remote date warning. + + Called: + eraAnp normalize angle into range 0 to 2pi + + Reference: Simon, J.L, Bretagnon, P., Chapront, J., + Chapront-Touze, M., Francou, G., and Laskar, J., + Astron. Astrophys. 282, 663 (1994). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + np_in = numpy.array(np, dtype=numpy.intc, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, np_in) + pv_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, np_in, pv_out[...,0,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._plan94(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'plan94') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(pv_out.shape) > 0 and pv_out.shape[0] == 1 + pv_out = pv_out.reshape(pv_out.shape[1:]) + + return pv_out +STATUS_CODES['plan94'] = {-1: 'illegal NP (outside 1-8)', 0: 'OK', 1: 'warning: year outside 1000-3000', 2: 'warning: failed to converge'} + + + +def fad03(t): + """ + Wrapper for ERFA function ``eraFad03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a F a d 0 3 + - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean elongation of the Moon from the Sun. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double D, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + is from Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fad03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fae03(t): + """ + Wrapper for ERFA function ``eraFae03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a F a e 0 3 + - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Earth. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Earth, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + comes from Souchay et al. (1999) after Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fae03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def faf03(t): + """ + Wrapper for ERFA function ``eraFaf03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a F a f 0 3 + - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of the Moon minus mean longitude of the ascending + node. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double F, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + is from Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._faf03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def faju03(t): + """ + Wrapper for ERFA function ``eraFaju03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a j u 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Jupiter. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Jupiter, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + comes from Souchay et al. (1999) after Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._faju03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fal03(t): + """ + Wrapper for ERFA function ``eraFal03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a F a l 0 3 + - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean anomaly of the Moon. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double l, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + is from Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fal03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def falp03(t): + """ + Wrapper for ERFA function ``eraFalp03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a l p 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean anomaly of the Sun. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double l', radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + is from Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._falp03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fama03(t): + """ + Wrapper for ERFA function ``eraFama03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a m a 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Mars. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Mars, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + comes from Souchay et al. (1999) after Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fama03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fame03(t): + """ + Wrapper for ERFA function ``eraFame03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a m e 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Mercury. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Mercury, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + comes from Souchay et al. (1999) after Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fame03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fane03(t): + """ + Wrapper for ERFA function ``eraFane03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a n e 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Neptune. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Neptune, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + is adapted from Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fane03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def faom03(t): + """ + Wrapper for ERFA function ``eraFaom03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a o m 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of the Moon's ascending node. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double Omega, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + is from Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._faom03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fapa03(t): + """ + Wrapper for ERFA function ``eraFapa03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a p a 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + general accumulated precession in longitude. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double general precession in longitude, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003). It + is taken from Kinoshita & Souchay (1990) and comes originally + from Lieske et al. (1977). + + References: + + Kinoshita, H. and Souchay J. 1990, Celest.Mech. and Dyn.Astron. + 48, 187 + + Lieske, J.H., Lederle, T., Fricke, W. & Morando, B. 1977, + Astron.Astrophys. 58, 1-16 + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fapa03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fasa03(t): + """ + Wrapper for ERFA function ``eraFasa03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a s a 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Saturn. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Saturn, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + comes from Souchay et al. (1999) after Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fasa03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def faur03(t): + """ + Wrapper for ERFA function ``eraFaur03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a u r 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Uranus. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Uranus, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + is adapted from Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._faur03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fave03(t): + """ + Wrapper for ERFA function ``eraFave03``. + + Parameters + ---------- + t : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F a v e 0 3 + - - - - - - - - - - + + Fundamental argument, IERS Conventions (2003): + mean longitude of Venus. + + Given: + t double TDB, Julian centuries since J2000.0 (Note 1) + + Returned (function value): + double mean longitude of Venus, radians (Note 2) + + Notes: + + 1) Though t is strictly TDB, it is usually more convenient to use + TT, which makes no significant difference. + + 2) The expression used is as adopted in IERS Conventions (2003) and + comes from Souchay et al. (1999) after Simon et al. (1994). + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + t_in = numpy.array(t, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), t_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [t_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fave03(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def bi00(): + """ + Wrapper for ERFA function ``eraBi00``. + + Parameters + ---------- + + Returns + ------- + dpsibi : double array + depsbi : double array + dra : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a B i 0 0 + - - - - - - - - + + Frame bias components of IAU 2000 precession-nutation models (part + of MHB2000 with additions). + + Returned: + dpsibi,depsbi double longitude and obliquity corrections + dra double the ICRS RA of the J2000.0 mean equinox + + Notes: + + 1) The frame bias corrections in longitude and obliquity (radians) + are required in order to correct for the offset between the GCRS + pole and the mean J2000.0 pole. They define, with respect to the + GCRS frame, a J2000.0 mean pole that is consistent with the rest + of the IAU 2000A precession-nutation model. + + 2) In addition to the displacement of the pole, the complete + description of the frame bias requires also an offset in right + ascension. This is not part of the IAU 2000A model, and is from + Chapront et al. (2002). It is returned in radians. + + 3) This is a supplemented implementation of one aspect of the IAU + 2000A nutation model, formally adopted by the IAU General + Assembly in 2000, namely MHB2000 (Mathews et al. 2002). + + References: + + Chapront, J., Chapront-Touze, M. & Francou, G., Astron. + Astrophys., 387, 700, 2002. + + Mathews, P.M., Herring, T.A., Buffet, B.A., "Modeling of nutation + and precession New nutation series for nonrigid Earth and + insights into the Earth's interior", J.Geophys.Res., 107, B4, + 2002. The MHB2000 code itself was obtained on 9th September 2002 + from ftp://maia.usno.navy.mil/conv2000/chapter5/IAU2000A. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ) + dpsibi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + depsbi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dra_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dpsibi_out, depsbi_out, dra_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*0 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._bi00(it) + + return dpsibi_out, depsbi_out, dra_out + + +def bp00(date1, date2): + """ + Wrapper for ERFA function ``eraBp00``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rb : double array + rp : double array + rbp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a B p 0 0 + - - - - - - - - + + Frame bias and precession, IAU 2000. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rb double[3][3] frame bias matrix (Note 2) + rp double[3][3] precession matrix (Note 3) + rbp double[3][3] bias-precession matrix (Note 4) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix rb transforms vectors from GCRS to mean J2000.0 by + applying frame bias. + + 3) The matrix rp transforms vectors from J2000.0 mean equator and + equinox to mean equator and equinox of date by applying + precession. + + 4) The matrix rbp transforms vectors from GCRS to mean equator and + equinox of date by applying frame bias then precession. It is + the product rp x rb. + + 5) It is permissible to re-use the same array in the returned + arguments. The arrays are filled in the order given. + + Called: + eraBi00 frame bias components, IAU 2000 + eraPr00 IAU 2000 precession adjustments + eraIr initialize r-matrix to identity + eraRx rotate around X-axis + eraRy rotate around Y-axis + eraRz rotate around Z-axis + eraCr copy r-matrix + eraRxr product of two r-matrices + + Reference: + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rb_out[...,0,0], rp_out[...,0,0], rbp_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._bp00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rb_out.shape) > 0 and rb_out.shape[0] == 1 + rb_out = rb_out.reshape(rb_out.shape[1:]) + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + + return rb_out, rp_out, rbp_out + + +def bp06(date1, date2): + """ + Wrapper for ERFA function ``eraBp06``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rb : double array + rp : double array + rbp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a B p 0 6 + - - - - - - - - + + Frame bias and precession, IAU 2006. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rb double[3][3] frame bias matrix (Note 2) + rp double[3][3] precession matrix (Note 3) + rbp double[3][3] bias-precession matrix (Note 4) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix rb transforms vectors from GCRS to mean J2000.0 by + applying frame bias. + + 3) The matrix rp transforms vectors from mean J2000.0 to mean of + date by applying precession. + + 4) The matrix rbp transforms vectors from GCRS to mean of date by + applying frame bias then precession. It is the product rp x rb. + + 5) It is permissible to re-use the same array in the returned + arguments. The arrays are filled in the order given. + + Called: + eraPfw06 bias-precession F-W angles, IAU 2006 + eraFw2m F-W angles to r-matrix + eraPmat06 PB matrix, IAU 2006 + eraTr transpose r-matrix + eraRxr product of two r-matrices + eraCr copy r-matrix + + References: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rb_out[...,0,0], rp_out[...,0,0], rbp_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._bp06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rb_out.shape) > 0 and rb_out.shape[0] == 1 + rb_out = rb_out.reshape(rb_out.shape[1:]) + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + + return rb_out, rp_out, rbp_out + + +def bpn2xy(rbpn): + """ + Wrapper for ERFA function ``eraBpn2xy``. + + Parameters + ---------- + rbpn : double array + + Returns + ------- + x : double array + y : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a B p n 2 x y + - - - - - - - - - - + + Extract from the bias-precession-nutation matrix the X,Y coordinates + of the Celestial Intermediate Pole. + + Given: + rbpn double[3][3] celestial-to-true matrix (Note 1) + + Returned: + x,y double Celestial Intermediate Pole (Note 2) + + Notes: + + 1) The matrix rbpn transforms vectors from GCRS to true equator (and + CIO or equinox) of date, and therefore the Celestial Intermediate + Pole unit vector is the bottom row of the matrix. + + 2) The arguments x,y are components of the Celestial Intermediate + Pole unit vector in the Geocentric Celestial Reference System. + + Reference: + + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 + (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rbpn_in = numpy.array(rbpn, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(rbpn_in, (3, 3), "rbpn") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rbpn_in[...,0,0]) + x_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + y_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rbpn_in[...,0,0], x_out, y_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._bpn2xy(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(x_out.shape) > 0 and x_out.shape[0] == 1 + x_out = x_out.reshape(x_out.shape[1:]) + assert len(y_out.shape) > 0 and y_out.shape[0] == 1 + y_out = y_out.reshape(y_out.shape[1:]) + + return x_out, y_out + + +def c2i00a(date1, date2): + """ + Wrapper for ERFA function ``eraC2i00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rc2i : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 i 0 0 a + - - - - - - - - - - + + Form the celestial-to-intermediate matrix for a given date using the + IAU 2000A precession-nutation model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rc2i double[3][3] celestial-to-intermediate matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix rc2i is the first stage in the transformation from + celestial to terrestrial coordinates: + + [TRS] = RPOM * R_3(ERA) * rc2i * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), ERA is the Earth + Rotation Angle and RPOM is the polar motion matrix. + + 3) A faster, but slightly less accurate result (about 1 mas), can be + obtained by using instead the eraC2i00b function. + + Called: + eraPnm00a classical NPB matrix, IAU 2000A + eraC2ibpn celestial-to-intermediate matrix, given NPB matrix + + References: + + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 + (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rc2i_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rc2i_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2i00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2i_out.shape) > 0 and rc2i_out.shape[0] == 1 + rc2i_out = rc2i_out.reshape(rc2i_out.shape[1:]) + + return rc2i_out + + +def c2i00b(date1, date2): + """ + Wrapper for ERFA function ``eraC2i00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rc2i : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 i 0 0 b + - - - - - - - - - - + + Form the celestial-to-intermediate matrix for a given date using the + IAU 2000B precession-nutation model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rc2i double[3][3] celestial-to-intermediate matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix rc2i is the first stage in the transformation from + celestial to terrestrial coordinates: + + [TRS] = RPOM * R_3(ERA) * rc2i * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), ERA is the Earth + Rotation Angle and RPOM is the polar motion matrix. + + 3) The present function is faster, but slightly less accurate (about + 1 mas), than the eraC2i00a function. + + Called: + eraPnm00b classical NPB matrix, IAU 2000B + eraC2ibpn celestial-to-intermediate matrix, given NPB matrix + + References: + + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 + (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rc2i_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rc2i_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2i00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2i_out.shape) > 0 and rc2i_out.shape[0] == 1 + rc2i_out = rc2i_out.reshape(rc2i_out.shape[1:]) + + return rc2i_out + + +def c2i06a(date1, date2): + """ + Wrapper for ERFA function ``eraC2i06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rc2i : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 i 0 6 a + - - - - - - - - - - + + Form the celestial-to-intermediate matrix for a given date using the + IAU 2006 precession and IAU 2000A nutation models. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rc2i double[3][3] celestial-to-intermediate matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix rc2i is the first stage in the transformation from + celestial to terrestrial coordinates: + + [TRS] = RPOM * R_3(ERA) * rc2i * [CRS] + + = RC2T * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), ERA is the Earth + Rotation Angle and RPOM is the polar motion matrix. + + Called: + eraPnm06a classical NPB matrix, IAU 2006/2000A + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS06 the CIO locator s, given X,Y, IAU 2006 + eraC2ixys celestial-to-intermediate matrix, given X,Y and s + + References: + + McCarthy, D. D., Petit, G. (eds.), 2004, IERS Conventions (2003), + IERS Technical Note No. 32, BKG + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rc2i_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rc2i_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2i06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2i_out.shape) > 0 and rc2i_out.shape[0] == 1 + rc2i_out = rc2i_out.reshape(rc2i_out.shape[1:]) + + return rc2i_out + + +def c2ibpn(date1, date2, rbpn): + """ + Wrapper for ERFA function ``eraC2ibpn``. + + Parameters + ---------- + date1 : double array + date2 : double array + rbpn : double array + + Returns + ------- + rc2i : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 i b p n + - - - - - - - - - - + + Form the celestial-to-intermediate matrix for a given date given + the bias-precession-nutation matrix. IAU 2000. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + rbpn double[3][3] celestial-to-true matrix (Note 2) + + Returned: + rc2i double[3][3] celestial-to-intermediate matrix (Note 3) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix rbpn transforms vectors from GCRS to true equator (and + CIO or equinox) of date. Only the CIP (bottom row) is used. + + 3) The matrix rc2i is the first stage in the transformation from + celestial to terrestrial coordinates: + + [TRS] = RPOM * R_3(ERA) * rc2i * [CRS] + + = RC2T * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), ERA is the Earth + Rotation Angle and RPOM is the polar motion matrix. + + 4) Although its name does not include "00", This function is in fact + specific to the IAU 2000 models. + + Called: + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraC2ixy celestial-to-intermediate matrix, given X,Y + + References: + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + rbpn_in = numpy.array(rbpn, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(rbpn_in, (3, 3), "rbpn") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, rbpn_in[...,0,0]) + rc2i_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rbpn_in[...,0,0], rc2i_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2ibpn(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2i_out.shape) > 0 and rc2i_out.shape[0] == 1 + rc2i_out = rc2i_out.reshape(rc2i_out.shape[1:]) + + return rc2i_out + + +def c2ixy(date1, date2, x, y): + """ + Wrapper for ERFA function ``eraC2ixy``. + + Parameters + ---------- + date1 : double array + date2 : double array + x : double array + y : double array + + Returns + ------- + rc2i : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a C 2 i x y + - - - - - - - - - + + Form the celestial to intermediate-frame-of-date matrix for a given + date when the CIP X,Y coordinates are known. IAU 2000. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + x,y double Celestial Intermediate Pole (Note 2) + + Returned: + rc2i double[3][3] celestial-to-intermediate matrix (Note 3) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The Celestial Intermediate Pole coordinates are the x,y components + of the unit vector in the Geocentric Celestial Reference System. + + 3) The matrix rc2i is the first stage in the transformation from + celestial to terrestrial coordinates: + + [TRS] = RPOM * R_3(ERA) * rc2i * [CRS] + + = RC2T * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), ERA is the Earth + Rotation Angle and RPOM is the polar motion matrix. + + 4) Although its name does not include "00", This function is in fact + specific to the IAU 2000 models. + + Called: + eraC2ixys celestial-to-intermediate matrix, given X,Y and s + eraS00 the CIO locator s, given X,Y, IAU 2000A + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + x_in = numpy.array(x, dtype=numpy.double, order="C", copy=False, subok=True) + y_in = numpy.array(y, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, x_in, y_in) + rc2i_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, x_in, y_in, rc2i_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2ixy(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2i_out.shape) > 0 and rc2i_out.shape[0] == 1 + rc2i_out = rc2i_out.reshape(rc2i_out.shape[1:]) + + return rc2i_out + + +def c2ixys(x, y, s): + """ + Wrapper for ERFA function ``eraC2ixys``. + + Parameters + ---------- + x : double array + y : double array + s : double array + + Returns + ------- + rc2i : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 i x y s + - - - - - - - - - - + + Form the celestial to intermediate-frame-of-date matrix given the CIP + X,Y and the CIO locator s. + + Given: + x,y double Celestial Intermediate Pole (Note 1) + s double the CIO locator s (Note 2) + + Returned: + rc2i double[3][3] celestial-to-intermediate matrix (Note 3) + + Notes: + + 1) The Celestial Intermediate Pole coordinates are the x,y + components of the unit vector in the Geocentric Celestial + Reference System. + + 2) The CIO locator s (in radians) positions the Celestial + Intermediate Origin on the equator of the CIP. + + 3) The matrix rc2i is the first stage in the transformation from + celestial to terrestrial coordinates: + + [TRS] = RPOM * R_3(ERA) * rc2i * [CRS] + + = RC2T * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), ERA is the Earth + Rotation Angle and RPOM is the polar motion matrix. + + Called: + eraIr initialize r-matrix to identity + eraRz rotate around Z-axis + eraRy rotate around Y-axis + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + x_in = numpy.array(x, dtype=numpy.double, order="C", copy=False, subok=True) + y_in = numpy.array(y, dtype=numpy.double, order="C", copy=False, subok=True) + s_in = numpy.array(s, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), x_in, y_in, s_in) + rc2i_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [x_in, y_in, s_in, rc2i_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2ixys(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2i_out.shape) > 0 and rc2i_out.shape[0] == 1 + rc2i_out = rc2i_out.reshape(rc2i_out.shape[1:]) + + return rc2i_out + + +def c2t00a(tta, ttb, uta, utb, xp, yp): + """ + Wrapper for ERFA function ``eraC2t00a``. + + Parameters + ---------- + tta : double array + ttb : double array + uta : double array + utb : double array + xp : double array + yp : double array + + Returns + ------- + rc2t : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 t 0 0 a + - - - - - - - - - - + + Form the celestial to terrestrial matrix given the date, the UT1 and + the polar motion, using the IAU 2000A nutation model. + + Given: + tta,ttb double TT as a 2-part Julian Date (Note 1) + uta,utb double UT1 as a 2-part Julian Date (Note 1) + xp,yp double coordinates of the pole (radians, Note 2) + + Returned: + rc2t double[3][3] celestial-to-terrestrial matrix (Note 3) + + Notes: + + 1) The TT and UT1 dates tta+ttb and uta+utb are Julian Dates, + apportioned in any convenient way between the arguments uta and + utb. For example, JD(UT1)=2450123.7 could be expressed in any of + these ways, among others: + + uta utb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. In the case of uta,utb, the + date & time method is best matched to the Earth rotation angle + algorithm used: maximum precision is delivered when the uta + argument is for 0hrs UT1 on the day in question and the utb + argument lies in the range 0 to 1, or vice versa. + + 2) The arguments xp and yp are the coordinates (in radians) of the + Celestial Intermediate Pole with respect to the International + Terrestrial Reference System (see IERS Conventions 2003), + measured along the meridians to 0 and 90 deg west respectively. + + 3) The matrix rc2t transforms from celestial to terrestrial + coordinates: + + [TRS] = RPOM * R_3(ERA) * RC2I * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), RC2I is the + celestial-to-intermediate matrix, ERA is the Earth rotation + angle and RPOM is the polar motion matrix. + + 4) A faster, but slightly less accurate result (about 1 mas), can + be obtained by using instead the eraC2t00b function. + + Called: + eraC2i00a celestial-to-intermediate matrix, IAU 2000A + eraEra00 Earth rotation angle, IAU 2000 + eraSp00 the TIO locator s', IERS 2000 + eraPom00 polar motion matrix + eraC2tcio form CIO-based celestial-to-terrestrial matrix + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tta_in, ttb_in, uta_in, utb_in, xp_in, yp_in) + rc2t_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tta_in, ttb_in, uta_in, utb_in, xp_in, yp_in, rc2t_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2t00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2t_out.shape) > 0 and rc2t_out.shape[0] == 1 + rc2t_out = rc2t_out.reshape(rc2t_out.shape[1:]) + + return rc2t_out + + +def c2t00b(tta, ttb, uta, utb, xp, yp): + """ + Wrapper for ERFA function ``eraC2t00b``. + + Parameters + ---------- + tta : double array + ttb : double array + uta : double array + utb : double array + xp : double array + yp : double array + + Returns + ------- + rc2t : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 t 0 0 b + - - - - - - - - - - + + Form the celestial to terrestrial matrix given the date, the UT1 and + the polar motion, using the IAU 2000B nutation model. + + Given: + tta,ttb double TT as a 2-part Julian Date (Note 1) + uta,utb double UT1 as a 2-part Julian Date (Note 1) + xp,yp double coordinates of the pole (radians, Note 2) + + Returned: + rc2t double[3][3] celestial-to-terrestrial matrix (Note 3) + + Notes: + + 1) The TT and UT1 dates tta+ttb and uta+utb are Julian Dates, + apportioned in any convenient way between the arguments uta and + utb. For example, JD(UT1)=2450123.7 could be expressed in any of + these ways, among others: + + uta utb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. In the case of uta,utb, the + date & time method is best matched to the Earth rotation angle + algorithm used: maximum precision is delivered when the uta + argument is for 0hrs UT1 on the day in question and the utb + argument lies in the range 0 to 1, or vice versa. + + 2) The arguments xp and yp are the coordinates (in radians) of the + Celestial Intermediate Pole with respect to the International + Terrestrial Reference System (see IERS Conventions 2003), + measured along the meridians to 0 and 90 deg west respectively. + + 3) The matrix rc2t transforms from celestial to terrestrial + coordinates: + + [TRS] = RPOM * R_3(ERA) * RC2I * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), RC2I is the + celestial-to-intermediate matrix, ERA is the Earth rotation + angle and RPOM is the polar motion matrix. + + 4) The present function is faster, but slightly less accurate (about + 1 mas), than the eraC2t00a function. + + Called: + eraC2i00b celestial-to-intermediate matrix, IAU 2000B + eraEra00 Earth rotation angle, IAU 2000 + eraPom00 polar motion matrix + eraC2tcio form CIO-based celestial-to-terrestrial matrix + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tta_in, ttb_in, uta_in, utb_in, xp_in, yp_in) + rc2t_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tta_in, ttb_in, uta_in, utb_in, xp_in, yp_in, rc2t_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2t00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2t_out.shape) > 0 and rc2t_out.shape[0] == 1 + rc2t_out = rc2t_out.reshape(rc2t_out.shape[1:]) + + return rc2t_out + + +def c2t06a(tta, ttb, uta, utb, xp, yp): + """ + Wrapper for ERFA function ``eraC2t06a``. + + Parameters + ---------- + tta : double array + ttb : double array + uta : double array + utb : double array + xp : double array + yp : double array + + Returns + ------- + rc2t : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 t 0 6 a + - - - - - - - - - - + + Form the celestial to terrestrial matrix given the date, the UT1 and + the polar motion, using the IAU 2006 precession and IAU 2000A + nutation models. + + Given: + tta,ttb double TT as a 2-part Julian Date (Note 1) + uta,utb double UT1 as a 2-part Julian Date (Note 1) + xp,yp double coordinates of the pole (radians, Note 2) + + Returned: + rc2t double[3][3] celestial-to-terrestrial matrix (Note 3) + + Notes: + + 1) The TT and UT1 dates tta+ttb and uta+utb are Julian Dates, + apportioned in any convenient way between the arguments uta and + utb. For example, JD(UT1)=2450123.7 could be expressed in any of + these ways, among others: + + uta utb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. In the case of uta,utb, the + date & time method is best matched to the Earth rotation angle + algorithm used: maximum precision is delivered when the uta + argument is for 0hrs UT1 on the day in question and the utb + argument lies in the range 0 to 1, or vice versa. + + 2) The arguments xp and yp are the coordinates (in radians) of the + Celestial Intermediate Pole with respect to the International + Terrestrial Reference System (see IERS Conventions 2003), + measured along the meridians to 0 and 90 deg west respectively. + + 3) The matrix rc2t transforms from celestial to terrestrial + coordinates: + + [TRS] = RPOM * R_3(ERA) * RC2I * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), RC2I is the + celestial-to-intermediate matrix, ERA is the Earth rotation + angle and RPOM is the polar motion matrix. + + Called: + eraC2i06a celestial-to-intermediate matrix, IAU 2006/2000A + eraEra00 Earth rotation angle, IAU 2000 + eraSp00 the TIO locator s', IERS 2000 + eraPom00 polar motion matrix + eraC2tcio form CIO-based celestial-to-terrestrial matrix + + Reference: + + McCarthy, D. D., Petit, G. (eds.), 2004, IERS Conventions (2003), + IERS Technical Note No. 32, BKG + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tta_in, ttb_in, uta_in, utb_in, xp_in, yp_in) + rc2t_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tta_in, ttb_in, uta_in, utb_in, xp_in, yp_in, rc2t_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2t06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2t_out.shape) > 0 and rc2t_out.shape[0] == 1 + rc2t_out = rc2t_out.reshape(rc2t_out.shape[1:]) + + return rc2t_out + + +def c2tcio(rc2i, era, rpom): + """ + Wrapper for ERFA function ``eraC2tcio``. + + Parameters + ---------- + rc2i : double array + era : double array + rpom : double array + + Returns + ------- + rc2t : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 t c i o + - - - - - - - - - - + + Assemble the celestial to terrestrial matrix from CIO-based + components (the celestial-to-intermediate matrix, the Earth Rotation + Angle and the polar motion matrix). + + Given: + rc2i double[3][3] celestial-to-intermediate matrix + era double Earth rotation angle (radians) + rpom double[3][3] polar-motion matrix + + Returned: + rc2t double[3][3] celestial-to-terrestrial matrix + + Notes: + + 1) This function constructs the rotation matrix that transforms + vectors in the celestial system into vectors in the terrestrial + system. It does so starting from precomputed components, namely + the matrix which rotates from celestial coordinates to the + intermediate frame, the Earth rotation angle and the polar motion + matrix. One use of the present function is when generating a + series of celestial-to-terrestrial matrices where only the Earth + Rotation Angle changes, avoiding the considerable overhead of + recomputing the precession-nutation more often than necessary to + achieve given accuracy objectives. + + 2) The relationship between the arguments is as follows: + + [TRS] = RPOM * R_3(ERA) * rc2i * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003). + + Called: + eraCr copy r-matrix + eraRz rotate around Z-axis + eraRxr product of two r-matrices + + Reference: + + McCarthy, D. D., Petit, G. (eds.), 2004, IERS Conventions (2003), + IERS Technical Note No. 32, BKG + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rc2i_in = numpy.array(rc2i, dtype=numpy.double, order="C", copy=False, subok=True) + era_in = numpy.array(era, dtype=numpy.double, order="C", copy=False, subok=True) + rpom_in = numpy.array(rpom, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(rc2i_in, (3, 3), "rc2i") + check_trailing_shape(rpom_in, (3, 3), "rpom") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rc2i_in[...,0,0], era_in, rpom_in[...,0,0]) + rc2t_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rc2i_in[...,0,0], era_in, rpom_in[...,0,0], rc2t_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2tcio(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2t_out.shape) > 0 and rc2t_out.shape[0] == 1 + rc2t_out = rc2t_out.reshape(rc2t_out.shape[1:]) + + return rc2t_out + + +def c2teqx(rbpn, gst, rpom): + """ + Wrapper for ERFA function ``eraC2teqx``. + + Parameters + ---------- + rbpn : double array + gst : double array + rpom : double array + + Returns + ------- + rc2t : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a C 2 t e q x + - - - - - - - - - - + + Assemble the celestial to terrestrial matrix from equinox-based + components (the celestial-to-true matrix, the Greenwich Apparent + Sidereal Time and the polar motion matrix). + + Given: + rbpn double[3][3] celestial-to-true matrix + gst double Greenwich (apparent) Sidereal Time (radians) + rpom double[3][3] polar-motion matrix + + Returned: + rc2t double[3][3] celestial-to-terrestrial matrix (Note 2) + + Notes: + + 1) This function constructs the rotation matrix that transforms + vectors in the celestial system into vectors in the terrestrial + system. It does so starting from precomputed components, namely + the matrix which rotates from celestial coordinates to the + true equator and equinox of date, the Greenwich Apparent Sidereal + Time and the polar motion matrix. One use of the present function + is when generating a series of celestial-to-terrestrial matrices + where only the Sidereal Time changes, avoiding the considerable + overhead of recomputing the precession-nutation more often than + necessary to achieve given accuracy objectives. + + 2) The relationship between the arguments is as follows: + + [TRS] = rpom * R_3(gst) * rbpn * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003). + + Called: + eraCr copy r-matrix + eraRz rotate around Z-axis + eraRxr product of two r-matrices + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rbpn_in = numpy.array(rbpn, dtype=numpy.double, order="C", copy=False, subok=True) + gst_in = numpy.array(gst, dtype=numpy.double, order="C", copy=False, subok=True) + rpom_in = numpy.array(rpom, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(rbpn_in, (3, 3), "rbpn") + check_trailing_shape(rpom_in, (3, 3), "rpom") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rbpn_in[...,0,0], gst_in, rpom_in[...,0,0]) + rc2t_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rbpn_in[...,0,0], gst_in, rpom_in[...,0,0], rc2t_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2teqx(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2t_out.shape) > 0 and rc2t_out.shape[0] == 1 + rc2t_out = rc2t_out.reshape(rc2t_out.shape[1:]) + + return rc2t_out + + +def c2tpe(tta, ttb, uta, utb, dpsi, deps, xp, yp): + """ + Wrapper for ERFA function ``eraC2tpe``. + + Parameters + ---------- + tta : double array + ttb : double array + uta : double array + utb : double array + dpsi : double array + deps : double array + xp : double array + yp : double array + + Returns + ------- + rc2t : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a C 2 t p e + - - - - - - - - - + + Form the celestial to terrestrial matrix given the date, the UT1, + the nutation and the polar motion. IAU 2000. + + Given: + tta,ttb double TT as a 2-part Julian Date (Note 1) + uta,utb double UT1 as a 2-part Julian Date (Note 1) + dpsi,deps double nutation (Note 2) + xp,yp double coordinates of the pole (radians, Note 3) + + Returned: + rc2t double[3][3] celestial-to-terrestrial matrix (Note 4) + + Notes: + + 1) The TT and UT1 dates tta+ttb and uta+utb are Julian Dates, + apportioned in any convenient way between the arguments uta and + utb. For example, JD(UT1)=2450123.7 could be expressed in any of + these ways, among others: + + uta utb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. In the case of uta,utb, the + date & time method is best matched to the Earth rotation angle + algorithm used: maximum precision is delivered when the uta + argument is for 0hrs UT1 on the day in question and the utb + argument lies in the range 0 to 1, or vice versa. + + 2) The caller is responsible for providing the nutation components; + they are in longitude and obliquity, in radians and are with + respect to the equinox and ecliptic of date. For high-accuracy + applications, free core nutation should be included as well as + any other relevant corrections to the position of the CIP. + + 3) The arguments xp and yp are the coordinates (in radians) of the + Celestial Intermediate Pole with respect to the International + Terrestrial Reference System (see IERS Conventions 2003), + measured along the meridians to 0 and 90 deg west respectively. + + 4) The matrix rc2t transforms from celestial to terrestrial + coordinates: + + [TRS] = RPOM * R_3(GST) * RBPN * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), RBPN is the + bias-precession-nutation matrix, GST is the Greenwich (apparent) + Sidereal Time and RPOM is the polar motion matrix. + + 5) Although its name does not include "00", This function is in fact + specific to the IAU 2000 models. + + Called: + eraPn00 bias/precession/nutation results, IAU 2000 + eraGmst00 Greenwich mean sidereal time, IAU 2000 + eraSp00 the TIO locator s', IERS 2000 + eraEe00 equation of the equinoxes, IAU 2000 + eraPom00 polar motion matrix + eraC2teqx form equinox-based celestial-to-terrestrial matrix + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + dpsi_in = numpy.array(dpsi, dtype=numpy.double, order="C", copy=False, subok=True) + deps_in = numpy.array(deps, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tta_in, ttb_in, uta_in, utb_in, dpsi_in, deps_in, xp_in, yp_in) + rc2t_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tta_in, ttb_in, uta_in, utb_in, dpsi_in, deps_in, xp_in, yp_in, rc2t_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*8 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2tpe(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2t_out.shape) > 0 and rc2t_out.shape[0] == 1 + rc2t_out = rc2t_out.reshape(rc2t_out.shape[1:]) + + return rc2t_out + + +def c2txy(tta, ttb, uta, utb, x, y, xp, yp): + """ + Wrapper for ERFA function ``eraC2txy``. + + Parameters + ---------- + tta : double array + ttb : double array + uta : double array + utb : double array + x : double array + y : double array + xp : double array + yp : double array + + Returns + ------- + rc2t : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a C 2 t x y + - - - - - - - - - + + Form the celestial to terrestrial matrix given the date, the UT1, + the CIP coordinates and the polar motion. IAU 2000. + + Given: + tta,ttb double TT as a 2-part Julian Date (Note 1) + uta,utb double UT1 as a 2-part Julian Date (Note 1) + x,y double Celestial Intermediate Pole (Note 2) + xp,yp double coordinates of the pole (radians, Note 3) + + Returned: + rc2t double[3][3] celestial-to-terrestrial matrix (Note 4) + + Notes: + + 1) The TT and UT1 dates tta+ttb and uta+utb are Julian Dates, + apportioned in any convenient way between the arguments uta and + utb. For example, JD(UT1)=2450123.7 could be expressed in any o + these ways, among others: + + uta utb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. In the case of uta,utb, the + date & time method is best matched to the Earth rotation angle + algorithm used: maximum precision is delivered when the uta + argument is for 0hrs UT1 on the day in question and the utb + argument lies in the range 0 to 1, or vice versa. + + 2) The Celestial Intermediate Pole coordinates are the x,y + components of the unit vector in the Geocentric Celestial + Reference System. + + 3) The arguments xp and yp are the coordinates (in radians) of the + Celestial Intermediate Pole with respect to the International + Terrestrial Reference System (see IERS Conventions 2003), + measured along the meridians to 0 and 90 deg west respectively. + + 4) The matrix rc2t transforms from celestial to terrestrial + coordinates: + + [TRS] = RPOM * R_3(ERA) * RC2I * [CRS] + + = rc2t * [CRS] + + where [CRS] is a vector in the Geocentric Celestial Reference + System and [TRS] is a vector in the International Terrestrial + Reference System (see IERS Conventions 2003), ERA is the Earth + Rotation Angle and RPOM is the polar motion matrix. + + 5) Although its name does not include "00", This function is in fact + specific to the IAU 2000 models. + + Called: + eraC2ixy celestial-to-intermediate matrix, given X,Y + eraEra00 Earth rotation angle, IAU 2000 + eraSp00 the TIO locator s', IERS 2000 + eraPom00 polar motion matrix + eraC2tcio form CIO-based celestial-to-terrestrial matrix + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + x_in = numpy.array(x, dtype=numpy.double, order="C", copy=False, subok=True) + y_in = numpy.array(y, dtype=numpy.double, order="C", copy=False, subok=True) + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tta_in, ttb_in, uta_in, utb_in, x_in, y_in, xp_in, yp_in) + rc2t_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tta_in, ttb_in, uta_in, utb_in, x_in, y_in, xp_in, yp_in, rc2t_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*8 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2txy(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rc2t_out.shape) > 0 and rc2t_out.shape[0] == 1 + rc2t_out = rc2t_out.reshape(rc2t_out.shape[1:]) + + return rc2t_out + + +def eo06a(date1, date2): + """ + Wrapper for ERFA function ``eraEo06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E o 0 6 a + - - - - - - - - - + + Equation of the origins, IAU 2006 precession and IAU 2000A nutation. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double equation of the origins in radians + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The equation of the origins is the distance between the true + equinox and the celestial intermediate origin and, equivalently, + the difference between Earth rotation angle and Greenwich + apparent sidereal time (ERA-GST). It comprises the precession + (since J2000.0) in right ascension plus the equation of the + equinoxes (including the small correction terms). + + Called: + eraPnm06a classical NPB matrix, IAU 2006/2000A + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS06 the CIO locator s, given X,Y, IAU 2006 + eraEors equation of the origins, given NPB matrix and s + + References: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._eo06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def eors(rnpb, s): + """ + Wrapper for ERFA function ``eraEors``. + + Parameters + ---------- + rnpb : double array + s : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a E o r s + - - - - - - - - + + Equation of the origins, given the classical NPB matrix and the + quantity s. + + Given: + rnpb double[3][3] classical nutation x precession x bias matrix + s double the quantity s (the CIO locator) + + Returned (function value): + double the equation of the origins in radians. + + Notes: + + 1) The equation of the origins is the distance between the true + equinox and the celestial intermediate origin and, equivalently, + the difference between Earth rotation angle and Greenwich + apparent sidereal time (ERA-GST). It comprises the precession + (since J2000.0) in right ascension plus the equation of the + equinoxes (including the small correction terms). + + 2) The algorithm is from Wallace & Capitaine (2006). + + References: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + Wallace, P. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rnpb_in = numpy.array(rnpb, dtype=numpy.double, order="C", copy=False, subok=True) + s_in = numpy.array(s, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(rnpb_in, (3, 3), "rnpb") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rnpb_in[...,0,0], s_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rnpb_in[...,0,0], s_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._eors(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def fw2m(gamb, phib, psi, eps): + """ + Wrapper for ERFA function ``eraFw2m``. + + Parameters + ---------- + gamb : double array + phib : double array + psi : double array + eps : double array + + Returns + ------- + r : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a F w 2 m + - - - - - - - - + + Form rotation matrix given the Fukushima-Williams angles. + + Given: + gamb double F-W angle gamma_bar (radians) + phib double F-W angle phi_bar (radians) + psi double F-W angle psi (radians) + eps double F-W angle epsilon (radians) + + Returned: + r double[3][3] rotation matrix + + Notes: + + 1) Naming the following points: + + e = J2000.0 ecliptic pole, + p = GCRS pole, + E = ecliptic pole of date, + and P = CIP, + + the four Fukushima-Williams angles are as follows: + + gamb = gamma = epE + phib = phi = pE + psi = psi = pEP + eps = epsilon = EP + + 2) The matrix representing the combined effects of frame bias, + precession and nutation is: + + NxPxB = R_1(-eps).R_3(-psi).R_1(phib).R_3(gamb) + + 3) Three different matrices can be constructed, depending on the + supplied angles: + + o To obtain the nutation x precession x frame bias matrix, + generate the four precession angles, generate the nutation + components and add them to the psi_bar and epsilon_A angles, + and call the present function. + + o To obtain the precession x frame bias matrix, generate the + four precession angles and call the present function. + + o To obtain the frame bias matrix, generate the four precession + angles for date J2000.0 and call the present function. + + The nutation-only and precession-only matrices can if necessary + be obtained by combining these three appropriately. + + Called: + eraIr initialize r-matrix to identity + eraRz rotate around Z-axis + eraRx rotate around X-axis + + Reference: + + Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + gamb_in = numpy.array(gamb, dtype=numpy.double, order="C", copy=False, subok=True) + phib_in = numpy.array(phib, dtype=numpy.double, order="C", copy=False, subok=True) + psi_in = numpy.array(psi, dtype=numpy.double, order="C", copy=False, subok=True) + eps_in = numpy.array(eps, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), gamb_in, phib_in, psi_in, eps_in) + r_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [gamb_in, phib_in, psi_in, eps_in, r_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fw2m(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(r_out.shape) > 0 and r_out.shape[0] == 1 + r_out = r_out.reshape(r_out.shape[1:]) + + return r_out + + +def fw2xy(gamb, phib, psi, eps): + """ + Wrapper for ERFA function ``eraFw2xy``. + + Parameters + ---------- + gamb : double array + phib : double array + psi : double array + eps : double array + + Returns + ------- + x : double array + y : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a F w 2 x y + - - - - - - - - - + + CIP X,Y given Fukushima-Williams bias-precession-nutation angles. + + Given: + gamb double F-W angle gamma_bar (radians) + phib double F-W angle phi_bar (radians) + psi double F-W angle psi (radians) + eps double F-W angle epsilon (radians) + + Returned: + x,y double CIP unit vector X,Y + + Notes: + + 1) Naming the following points: + + e = J2000.0 ecliptic pole, + p = GCRS pole + E = ecliptic pole of date, + and P = CIP, + + the four Fukushima-Williams angles are as follows: + + gamb = gamma = epE + phib = phi = pE + psi = psi = pEP + eps = epsilon = EP + + 2) The matrix representing the combined effects of frame bias, + precession and nutation is: + + NxPxB = R_1(-epsA).R_3(-psi).R_1(phib).R_3(gamb) + + The returned values x,y are elements [2][0] and [2][1] of the + matrix. Near J2000.0, they are essentially angles in radians. + + Called: + eraFw2m F-W angles to r-matrix + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + + Reference: + + Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + gamb_in = numpy.array(gamb, dtype=numpy.double, order="C", copy=False, subok=True) + phib_in = numpy.array(phib, dtype=numpy.double, order="C", copy=False, subok=True) + psi_in = numpy.array(psi, dtype=numpy.double, order="C", copy=False, subok=True) + eps_in = numpy.array(eps, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), gamb_in, phib_in, psi_in, eps_in) + x_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + y_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [gamb_in, phib_in, psi_in, eps_in, x_out, y_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fw2xy(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(x_out.shape) > 0 and x_out.shape[0] == 1 + x_out = x_out.reshape(x_out.shape[1:]) + assert len(y_out.shape) > 0 and y_out.shape[0] == 1 + y_out = y_out.reshape(y_out.shape[1:]) + + return x_out, y_out + + +def ltp(epj): + """ + Wrapper for ERFA function ``eraLtp``. + + Parameters + ---------- + epj : double array + + Returns + ------- + rp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a L t p + - - - - - - - + + Long-term precession matrix. + + Given: + epj double Julian epoch (TT) + + Returned: + rp double[3][3] precession matrix, J2000.0 to date + + Notes: + + 1) The matrix is in the sense + + P_date = rp x P_J2000, + + where P_J2000 is a vector with respect to the J2000.0 mean + equator and equinox and P_date is the same vector with respect to + the equator and equinox of epoch epj. + + 2) The Vondrak et al. (2011, 2012) 400 millennia precession model + agrees with the IAU 2006 precession at J2000.0 and stays within + 100 microarcseconds during the 20th and 21st centuries. It is + accurate to a few arcseconds throughout the historical period, + worsening to a few tenths of a degree at the end of the + +/- 200,000 year time span. + + Called: + eraLtpequ equator pole, long term + eraLtpecl ecliptic pole, long term + eraPxp vector product + eraPn normalize vector + + References: + + Vondrak, J., Capitaine, N. and Wallace, P., 2011, New precession + expressions, valid for long time intervals, Astron.Astrophys. 534, + A22 + + Vondrak, J., Capitaine, N. and Wallace, P., 2012, New precession + expressions, valid for long time intervals (Corrigendum), + Astron.Astrophys. 541, C1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, rp_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ltp(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + + return rp_out + + +def ltpb(epj): + """ + Wrapper for ERFA function ``eraLtpb``. + + Parameters + ---------- + epj : double array + + Returns + ------- + rpb : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a L t p b + - - - - - - - - + + Long-term precession matrix, including ICRS frame bias. + + Given: + epj double Julian epoch (TT) + + Returned: + rpb double[3][3] precession-bias matrix, J2000.0 to date + + Notes: + + 1) The matrix is in the sense + + P_date = rpb x P_ICRS, + + where P_ICRS is a vector in the Geocentric Celestial Reference + System, and P_date is the vector with respect to the Celestial + Intermediate Reference System at that date but with nutation + neglected. + + 2) A first order frame bias formulation is used, of sub- + microarcsecond accuracy compared with a full 3D rotation. + + 3) The Vondrak et al. (2011, 2012) 400 millennia precession model + agrees with the IAU 2006 precession at J2000.0 and stays within + 100 microarcseconds during the 20th and 21st centuries. It is + accurate to a few arcseconds throughout the historical period, + worsening to a few tenths of a degree at the end of the + +/- 200,000 year time span. + + References: + + Vondrak, J., Capitaine, N. and Wallace, P., 2011, New precession + expressions, valid for long time intervals, Astron.Astrophys. 534, + A22 + + Vondrak, J., Capitaine, N. and Wallace, P., 2012, New precession + expressions, valid for long time intervals (Corrigendum), + Astron.Astrophys. 541, C1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in) + rpb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, rpb_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ltpb(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rpb_out.shape) > 0 and rpb_out.shape[0] == 1 + rpb_out = rpb_out.reshape(rpb_out.shape[1:]) + + return rpb_out + + +def ltpecl(epj): + """ + Wrapper for ERFA function ``eraLtpecl``. + + Parameters + ---------- + epj : double array + + Returns + ------- + vec : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a L t p e c l + - - - - - - - - - - + + Long-term precession of the ecliptic. + + Given: + epj double Julian epoch (TT) + + Returned: + vec double[3] ecliptic pole unit vector + + Notes: + + 1) The returned vector is with respect to the J2000.0 mean equator + and equinox. + + 2) The Vondrak et al. (2011, 2012) 400 millennia precession model + agrees with the IAU 2006 precession at J2000.0 and stays within + 100 microarcseconds during the 20th and 21st centuries. It is + accurate to a few arcseconds throughout the historical period, + worsening to a few tenths of a degree at the end of the + +/- 200,000 year time span. + + References: + + Vondrak, J., Capitaine, N. and Wallace, P., 2011, New precession + expressions, valid for long time intervals, Astron.Astrophys. 534, + A22 + + Vondrak, J., Capitaine, N. and Wallace, P., 2012, New precession + expressions, valid for long time intervals (Corrigendum), + Astron.Astrophys. 541, C1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in) + vec_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, vec_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ltpecl(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(vec_out.shape) > 0 and vec_out.shape[0] == 1 + vec_out = vec_out.reshape(vec_out.shape[1:]) + + return vec_out + + +def ltpequ(epj): + """ + Wrapper for ERFA function ``eraLtpequ``. + + Parameters + ---------- + epj : double array + + Returns + ------- + veq : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a L t p e q u + - - - - - - - - - - + + Long-term precession of the equator. + + Given: + epj double Julian epoch (TT) + + Returned: + veq double[3] equator pole unit vector + + Notes: + + 1) The returned vector is with respect to the J2000.0 mean equator + and equinox. + + 2) The Vondrak et al. (2011, 2012) 400 millennia precession model + agrees with the IAU 2006 precession at J2000.0 and stays within + 100 microarcseconds during the 20th and 21st centuries. It is + accurate to a few arcseconds throughout the historical period, + worsening to a few tenths of a degree at the end of the + +/- 200,000 year time span. + + References: + + Vondrak, J., Capitaine, N. and Wallace, P., 2011, New precession + expressions, valid for long time intervals, Astron.Astrophys. 534, + A22 + + Vondrak, J., Capitaine, N. and Wallace, P., 2012, New precession + expressions, valid for long time intervals (Corrigendum), + Astron.Astrophys. 541, C1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in) + veq_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, veq_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ltpequ(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(veq_out.shape) > 0 and veq_out.shape[0] == 1 + veq_out = veq_out.reshape(veq_out.shape[1:]) + + return veq_out + + +def num00a(date1, date2): + """ + Wrapper for ERFA function ``eraNum00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rmatn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a N u m 0 0 a + - - - - - - - - - - + + Form the matrix of nutation for a given date, IAU 2000A model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rmatn double[3][3] nutation matrix + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(true) = rmatn * V(mean), where + the p-vector V(true) is with respect to the true equatorial triad + of date and the p-vector V(mean) is with respect to the mean + equatorial triad of date. + + 3) A faster, but slightly less accurate result (about 1 mas), can be + obtained by using instead the eraNum00b function. + + Called: + eraPn00a bias/precession/nutation, IAU 2000A + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 3.222-3 (p114). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rmatn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rmatn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._num00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rmatn_out.shape) > 0 and rmatn_out.shape[0] == 1 + rmatn_out = rmatn_out.reshape(rmatn_out.shape[1:]) + + return rmatn_out + + +def num00b(date1, date2): + """ + Wrapper for ERFA function ``eraNum00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rmatn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a N u m 0 0 b + - - - - - - - - - - + + Form the matrix of nutation for a given date, IAU 2000B model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rmatn double[3][3] nutation matrix + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(true) = rmatn * V(mean), where + the p-vector V(true) is with respect to the true equatorial triad + of date and the p-vector V(mean) is with respect to the mean + equatorial triad of date. + + 3) The present function is faster, but slightly less accurate (about + 1 mas), than the eraNum00a function. + + Called: + eraPn00b bias/precession/nutation, IAU 2000B + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 3.222-3 (p114). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rmatn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rmatn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._num00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rmatn_out.shape) > 0 and rmatn_out.shape[0] == 1 + rmatn_out = rmatn_out.reshape(rmatn_out.shape[1:]) + + return rmatn_out + + +def num06a(date1, date2): + """ + Wrapper for ERFA function ``eraNum06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rmatn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a N u m 0 6 a + - - - - - - - - - - + + Form the matrix of nutation for a given date, IAU 2006/2000A model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rmatn double[3][3] nutation matrix + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(true) = rmatn * V(mean), where + the p-vector V(true) is with respect to the true equatorial triad + of date and the p-vector V(mean) is with respect to the mean + equatorial triad of date. + + Called: + eraObl06 mean obliquity, IAU 2006 + eraNut06a nutation, IAU 2006/2000A + eraNumat form nutation matrix + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 3.222-3 (p114). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rmatn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rmatn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._num06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rmatn_out.shape) > 0 and rmatn_out.shape[0] == 1 + rmatn_out = rmatn_out.reshape(rmatn_out.shape[1:]) + + return rmatn_out + + +def numat(epsa, dpsi, deps): + """ + Wrapper for ERFA function ``eraNumat``. + + Parameters + ---------- + epsa : double array + dpsi : double array + deps : double array + + Returns + ------- + rmatn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a N u m a t + - - - - - - - - - + + Form the matrix of nutation. + + Given: + epsa double mean obliquity of date (Note 1) + dpsi,deps double nutation (Note 2) + + Returned: + rmatn double[3][3] nutation matrix (Note 3) + + Notes: + + + 1) The supplied mean obliquity epsa, must be consistent with the + precession-nutation models from which dpsi and deps were obtained. + + 2) The caller is responsible for providing the nutation components; + they are in longitude and obliquity, in radians and are with + respect to the equinox and ecliptic of date. + + 3) The matrix operates in the sense V(true) = rmatn * V(mean), + where the p-vector V(true) is with respect to the true + equatorial triad of date and the p-vector V(mean) is with + respect to the mean equatorial triad of date. + + Called: + eraIr initialize r-matrix to identity + eraRx rotate around X-axis + eraRz rotate around Z-axis + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 3.222-3 (p114). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epsa_in = numpy.array(epsa, dtype=numpy.double, order="C", copy=False, subok=True) + dpsi_in = numpy.array(dpsi, dtype=numpy.double, order="C", copy=False, subok=True) + deps_in = numpy.array(deps, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epsa_in, dpsi_in, deps_in) + rmatn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epsa_in, dpsi_in, deps_in, rmatn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._numat(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rmatn_out.shape) > 0 and rmatn_out.shape[0] == 1 + rmatn_out = rmatn_out.reshape(rmatn_out.shape[1:]) + + return rmatn_out + + +def nut00a(date1, date2): + """ + Wrapper for ERFA function ``eraNut00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsi : double array + deps : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a N u t 0 0 a + - - - - - - - - - - + + Nutation, IAU 2000A model (MHB2000 luni-solar and planetary nutation + with free core nutation omitted). + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsi,deps double nutation, luni-solar + planetary (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The nutation components in longitude and obliquity are in radians + and with respect to the equinox and ecliptic of date. The + obliquity at J2000.0 is assumed to be the Lieske et al. (1977) + value of 84381.448 arcsec. + + Both the luni-solar and planetary nutations are included. The + latter are due to direct planetary nutations and the + perturbations of the lunar and terrestrial orbits. + + 3) The function computes the MHB2000 nutation series with the + associated corrections for planetary nutations. It is an + implementation of the nutation part of the IAU 2000A precession- + nutation model, formally adopted by the IAU General Assembly in + 2000, namely MHB2000 (Mathews et al. 2002), but with the free + core nutation (FCN - see Note 4) omitted. + + 4) The full MHB2000 model also contains contributions to the + nutations in longitude and obliquity due to the free-excitation + of the free-core-nutation during the period 1979-2000. These FCN + terms, which are time-dependent and unpredictable, are NOT + included in the present function and, if required, must be + independently computed. With the FCN corrections included, the + present function delivers a pole which is at current epochs + accurate to a few hundred microarcseconds. The omission of FCN + introduces further errors of about that size. + + 5) The present function provides classical nutation. The MHB2000 + algorithm, from which it is adapted, deals also with (i) the + offsets between the GCRS and mean poles and (ii) the adjustments + in longitude and obliquity due to the changed precession rates. + These additional functions, namely frame bias and precession + adjustments, are supported by the ERFA functions eraBi00 and + eraPr00. + + 6) The MHB2000 algorithm also provides "total" nutations, comprising + the arithmetic sum of the frame bias, precession adjustments, + luni-solar nutation and planetary nutation. These total + nutations can be used in combination with an existing IAU 1976 + precession implementation, such as eraPmat76, to deliver GCRS- + to-true predictions of sub-mas accuracy at current dates. + However, there are three shortcomings in the MHB2000 model that + must be taken into account if more accurate or definitive results + are required (see Wallace 2002): + + (i) The MHB2000 total nutations are simply arithmetic sums, + yet in reality the various components are successive Euler + rotations. This slight lack of rigor leads to cross terms + that exceed 1 mas after a century. The rigorous procedure + is to form the GCRS-to-true rotation matrix by applying the + bias, precession and nutation in that order. + + (ii) Although the precession adjustments are stated to be with + respect to Lieske et al. (1977), the MHB2000 model does + not specify which set of Euler angles are to be used and + how the adjustments are to be applied. The most literal + and straightforward procedure is to adopt the 4-rotation + epsilon_0, psi_A, omega_A, xi_A option, and to add DPSIPR + to psi_A and DEPSPR to both omega_A and eps_A. + + (iii) The MHB2000 model predates the determination by Chapront + et al. (2002) of a 14.6 mas displacement between the + J2000.0 mean equinox and the origin of the ICRS frame. It + should, however, be noted that neglecting this displacement + when calculating star coordinates does not lead to a + 14.6 mas change in right ascension, only a small second- + order distortion in the pattern of the precession-nutation + effect. + + For these reasons, the ERFA functions do not generate the "total + nutations" directly, though they can of course easily be + generated by calling eraBi00, eraPr00 and the present function + and adding the results. + + 7) The MHB2000 model contains 41 instances where the same frequency + appears multiple times, of which 38 are duplicates and three are + triplicates. To keep the present code close to the original MHB + algorithm, this small inefficiency has not been corrected. + + Called: + eraFal03 mean anomaly of the Moon + eraFaf03 mean argument of the latitude of the Moon + eraFaom03 mean longitude of the Moon's ascending node + eraFame03 mean longitude of Mercury + eraFave03 mean longitude of Venus + eraFae03 mean longitude of Earth + eraFama03 mean longitude of Mars + eraFaju03 mean longitude of Jupiter + eraFasa03 mean longitude of Saturn + eraFaur03 mean longitude of Uranus + eraFapa03 general accumulated precession in longitude + + References: + + Chapront, J., Chapront-Touze, M. & Francou, G. 2002, + Astron.Astrophys. 387, 700 + + Lieske, J.H., Lederle, T., Fricke, W. & Morando, B. 1977, + Astron.Astrophys. 58, 1-16 + + Mathews, P.M., Herring, T.A., Buffet, B.A. 2002, J.Geophys.Res. + 107, B4. The MHB_2000 code itself was obtained on 9th September + 2002 from ftp//maia.usno.navy.mil/conv2000/chapter5/IAU2000A. + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Wallace, P.T., "Software for Implementing the IAU 2000 + Resolutions", in IERS Workshop 5.1 (2002) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + deps_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_out, deps_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._nut00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsi_out.shape) > 0 and dpsi_out.shape[0] == 1 + dpsi_out = dpsi_out.reshape(dpsi_out.shape[1:]) + assert len(deps_out.shape) > 0 and deps_out.shape[0] == 1 + deps_out = deps_out.reshape(deps_out.shape[1:]) + + return dpsi_out, deps_out + + +def nut00b(date1, date2): + """ + Wrapper for ERFA function ``eraNut00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsi : double array + deps : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a N u t 0 0 b + - - - - - - - - - - + + Nutation, IAU 2000B model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsi,deps double nutation, luni-solar + planetary (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The nutation components in longitude and obliquity are in radians + and with respect to the equinox and ecliptic of date. The + obliquity at J2000.0 is assumed to be the Lieske et al. (1977) + value of 84381.448 arcsec. (The errors that result from using + this function with the IAU 2006 value of 84381.406 arcsec can be + neglected.) + + The nutation model consists only of luni-solar terms, but + includes also a fixed offset which compensates for certain long- + period planetary terms (Note 7). + + 3) This function is an implementation of the IAU 2000B abridged + nutation model formally adopted by the IAU General Assembly in + 2000. The function computes the MHB_2000_SHORT luni-solar + nutation series (Luzum 2001), but without the associated + corrections for the precession rate adjustments and the offset + between the GCRS and J2000.0 mean poles. + + 4) The full IAU 2000A (MHB2000) nutation model contains nearly 1400 + terms. The IAU 2000B model (McCarthy & Luzum 2003) contains only + 77 terms, plus additional simplifications, yet still delivers + results of 1 mas accuracy at present epochs. This combination of + accuracy and size makes the IAU 2000B abridged nutation model + suitable for most practical applications. + + The function delivers a pole accurate to 1 mas from 1900 to 2100 + (usually better than 1 mas, very occasionally just outside + 1 mas). The full IAU 2000A model, which is implemented in the + function eraNut00a (q.v.), delivers considerably greater accuracy + at current dates; however, to realize this improved accuracy, + corrections for the essentially unpredictable free-core-nutation + (FCN) must also be included. + + 5) The present function provides classical nutation. The + MHB_2000_SHORT algorithm, from which it is adapted, deals also + with (i) the offsets between the GCRS and mean poles and (ii) the + adjustments in longitude and obliquity due to the changed + precession rates. These additional functions, namely frame bias + and precession adjustments, are supported by the ERFA functions + eraBi00 and eraPr00. + + 6) The MHB_2000_SHORT algorithm also provides "total" nutations, + comprising the arithmetic sum of the frame bias, precession + adjustments, and nutation (luni-solar + planetary). These total + nutations can be used in combination with an existing IAU 1976 + precession implementation, such as eraPmat76, to deliver GCRS- + to-true predictions of mas accuracy at current epochs. However, + for symmetry with the eraNut00a function (q.v. for the reasons), + the ERFA functions do not generate the "total nutations" + directly. Should they be required, they could of course easily + be generated by calling eraBi00, eraPr00 and the present function + and adding the results. + + 7) The IAU 2000B model includes "planetary bias" terms that are + fixed in size but compensate for long-period nutations. The + amplitudes quoted in McCarthy & Luzum (2003), namely + Dpsi = -1.5835 mas and Depsilon = +1.6339 mas, are optimized for + the "total nutations" method described in Note 6. The Luzum + (2001) values used in this ERFA implementation, namely -0.135 mas + and +0.388 mas, are optimized for the "rigorous" method, where + frame bias, precession and nutation are applied separately and in + that order. During the interval 1995-2050, the ERFA + implementation delivers a maximum error of 1.001 mas (not + including FCN). + + References: + + Lieske, J.H., Lederle, T., Fricke, W., Morando, B., "Expressions + for the precession quantities based upon the IAU /1976/ system of + astronomical constants", Astron.Astrophys. 58, 1-2, 1-16. (1977) + + Luzum, B., private communication, 2001 (Fortran code + MHB_2000_SHORT) + + McCarthy, D.D. & Luzum, B.J., "An abridged model of the + precession-nutation of the celestial pole", Cel.Mech.Dyn.Astron. + 85, 37-49 (2003) + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J., Astron.Astrophys. 282, 663-683 (1994) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + deps_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_out, deps_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._nut00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsi_out.shape) > 0 and dpsi_out.shape[0] == 1 + dpsi_out = dpsi_out.reshape(dpsi_out.shape[1:]) + assert len(deps_out.shape) > 0 and deps_out.shape[0] == 1 + deps_out = deps_out.reshape(deps_out.shape[1:]) + + return dpsi_out, deps_out + + +def nut06a(date1, date2): + """ + Wrapper for ERFA function ``eraNut06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsi : double array + deps : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a N u t 0 6 a + - - - - - - - - - - + + IAU 2000A nutation with adjustments to match the IAU 2006 + precession. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsi,deps double nutation, luni-solar + planetary (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The nutation components in longitude and obliquity are in radians + and with respect to the mean equinox and ecliptic of date, + IAU 2006 precession model (Hilton et al. 2006, Capitaine et al. + 2005). + + 3) The function first computes the IAU 2000A nutation, then applies + adjustments for (i) the consequences of the change in obliquity + from the IAU 1980 ecliptic to the IAU 2006 ecliptic and (ii) the + secular variation in the Earth's dynamical form factor J2. + + 4) The present function provides classical nutation, complementing + the IAU 2000 frame bias and IAU 2006 precession. It delivers a + pole which is at current epochs accurate to a few tens of + microarcseconds, apart from the free core nutation. + + Called: + eraNut00a nutation, IAU 2000A + + References: + + Chapront, J., Chapront-Touze, M. & Francou, G. 2002, + Astron.Astrophys. 387, 700 + + Lieske, J.H., Lederle, T., Fricke, W. & Morando, B. 1977, + Astron.Astrophys. 58, 1-16 + + Mathews, P.M., Herring, T.A., Buffet, B.A. 2002, J.Geophys.Res. + 107, B4. The MHB_2000 code itself was obtained on 9th September + 2002 from ftp//maia.usno.navy.mil/conv2000/chapter5/IAU2000A. + + Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M. 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Wallace, P.T., "Software for Implementing the IAU 2000 + Resolutions", in IERS Workshop 5.1 (2002) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + deps_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_out, deps_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._nut06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsi_out.shape) > 0 and dpsi_out.shape[0] == 1 + dpsi_out = dpsi_out.reshape(dpsi_out.shape[1:]) + assert len(deps_out.shape) > 0 and deps_out.shape[0] == 1 + deps_out = deps_out.reshape(deps_out.shape[1:]) + + return dpsi_out, deps_out + + +def nut80(date1, date2): + """ + Wrapper for ERFA function ``eraNut80``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsi : double array + deps : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a N u t 8 0 + - - - - - - - - - + + Nutation, IAU 1980 model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsi double nutation in longitude (radians) + deps double nutation in obliquity (radians) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The nutation components are with respect to the ecliptic of + date. + + Called: + eraAnpm normalize angle into range +/- pi + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 3.222 (p111). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + deps_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_out, deps_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._nut80(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsi_out.shape) > 0 and dpsi_out.shape[0] == 1 + dpsi_out = dpsi_out.reshape(dpsi_out.shape[1:]) + assert len(deps_out.shape) > 0 and deps_out.shape[0] == 1 + deps_out = deps_out.reshape(deps_out.shape[1:]) + + return dpsi_out, deps_out + + +def nutm80(date1, date2): + """ + Wrapper for ERFA function ``eraNutm80``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rmatn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a N u t m 8 0 + - - - - - - - - - - + + Form the matrix of nutation for a given date, IAU 1980 model. + + Given: + date1,date2 double TDB date (Note 1) + + Returned: + rmatn double[3][3] nutation matrix + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(true) = rmatn * V(mean), + where the p-vector V(true) is with respect to the true + equatorial triad of date and the p-vector V(mean) is with + respect to the mean equatorial triad of date. + + Called: + eraNut80 nutation, IAU 1980 + eraObl80 mean obliquity, IAU 1980 + eraNumat form nutation matrix + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rmatn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rmatn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._nutm80(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rmatn_out.shape) > 0 and rmatn_out.shape[0] == 1 + rmatn_out = rmatn_out.reshape(rmatn_out.shape[1:]) + + return rmatn_out + + +def obl06(date1, date2): + """ + Wrapper for ERFA function ``eraObl06``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a O b l 0 6 + - - - - - - - - - + + Mean obliquity of the ecliptic, IAU 2006 precession model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double obliquity of the ecliptic (radians, Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The result is the angle between the ecliptic and mean equator of + date date1+date2. + + Reference: + + Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._obl06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def obl80(date1, date2): + """ + Wrapper for ERFA function ``eraObl80``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a O b l 8 0 + - - - - - - - - - + + Mean obliquity of the ecliptic, IAU 1980 model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double obliquity of the ecliptic (radians, Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The result is the angle between the ecliptic and mean equator of + date date1+date2. + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Expression 3.222-1 (p114). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._obl80(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def p06e(date1, date2): + """ + Wrapper for ERFA function ``eraP06e``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + eps0 : double array + psia : double array + oma : double array + bpa : double array + bqa : double array + pia : double array + bpia : double array + epsa : double array + chia : double array + za : double array + zetaa : double array + thetaa : double array + pa : double array + gam : double array + phi : double array + psi : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a P 0 6 e + - - - - - - - - + + Precession angles, IAU 2006, equinox based. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (see Note 2): + eps0 double epsilon_0 + psia double psi_A + oma double omega_A + bpa double P_A + bqa double Q_A + pia double pi_A + bpia double Pi_A + epsa double obliquity epsilon_A + chia double chi_A + za double z_A + zetaa double zeta_A + thetaa double theta_A + pa double p_A + gam double F-W angle gamma_J2000 + phi double F-W angle phi_J2000 + psi double F-W angle psi_J2000 + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) This function returns the set of equinox based angles for the + Capitaine et al. "P03" precession theory, adopted by the IAU in + 2006. The angles are set out in Table 1 of Hilton et al. (2006): + + eps0 epsilon_0 obliquity at J2000.0 + psia psi_A luni-solar precession + oma omega_A inclination of equator wrt J2000.0 ecliptic + bpa P_A ecliptic pole x, J2000.0 ecliptic triad + bqa Q_A ecliptic pole -y, J2000.0 ecliptic triad + pia pi_A angle between moving and J2000.0 ecliptics + bpia Pi_A longitude of ascending node of the ecliptic + epsa epsilon_A obliquity of the ecliptic + chia chi_A planetary precession + za z_A equatorial precession: -3rd 323 Euler angle + zetaa zeta_A equatorial precession: -1st 323 Euler angle + thetaa theta_A equatorial precession: 2nd 323 Euler angle + pa p_A general precession + gam gamma_J2000 J2000.0 RA difference of ecliptic poles + phi phi_J2000 J2000.0 codeclination of ecliptic pole + psi psi_J2000 longitude difference of equator poles, J2000.0 + + The returned values are all radians. + + 3) Hilton et al. (2006) Table 1 also contains angles that depend on + models distinct from the P03 precession theory itself, namely the + IAU 2000A frame bias and nutation. The quoted polynomials are + used in other ERFA functions: + + . eraXy06 contains the polynomial parts of the X and Y series. + + . eraS06 contains the polynomial part of the s+XY/2 series. + + . eraPfw06 implements the series for the Fukushima-Williams + angles that are with respect to the GCRS pole (i.e. the variants + that include frame bias). + + 4) The IAU resolution stipulated that the choice of parameterization + was left to the user, and so an IAU compliant precession + implementation can be constructed using various combinations of + the angles returned by the present function. + + 5) The parameterization used by ERFA is the version of the Fukushima- + Williams angles that refers directly to the GCRS pole. These + angles may be calculated by calling the function eraPfw06. ERFA + also supports the direct computation of the CIP GCRS X,Y by + series, available by calling eraXy06. + + 6) The agreement between the different parameterizations is at the + 1 microarcsecond level in the present era. + + 7) When constructing a precession formulation that refers to the GCRS + pole rather than the dynamical pole, it may (depending on the + choice of angles) be necessary to introduce the frame bias + explicitly. + + 8) It is permissible to re-use the same variable in the returned + arguments. The quantities are stored in the stated order. + + Reference: + + Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 + + Called: + eraObl06 mean obliquity, IAU 2006 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + eps0_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + psia_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + oma_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + bpa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + bqa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pia_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + bpia_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + epsa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + chia_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + za_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + zetaa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + thetaa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + gam_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + phi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + psi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, eps0_out, psia_out, oma_out, bpa_out, bqa_out, pia_out, bpia_out, epsa_out, chia_out, za_out, zetaa_out, thetaa_out, pa_out, gam_out, phi_out, psi_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*16 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._p06e(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(eps0_out.shape) > 0 and eps0_out.shape[0] == 1 + eps0_out = eps0_out.reshape(eps0_out.shape[1:]) + assert len(psia_out.shape) > 0 and psia_out.shape[0] == 1 + psia_out = psia_out.reshape(psia_out.shape[1:]) + assert len(oma_out.shape) > 0 and oma_out.shape[0] == 1 + oma_out = oma_out.reshape(oma_out.shape[1:]) + assert len(bpa_out.shape) > 0 and bpa_out.shape[0] == 1 + bpa_out = bpa_out.reshape(bpa_out.shape[1:]) + assert len(bqa_out.shape) > 0 and bqa_out.shape[0] == 1 + bqa_out = bqa_out.reshape(bqa_out.shape[1:]) + assert len(pia_out.shape) > 0 and pia_out.shape[0] == 1 + pia_out = pia_out.reshape(pia_out.shape[1:]) + assert len(bpia_out.shape) > 0 and bpia_out.shape[0] == 1 + bpia_out = bpia_out.reshape(bpia_out.shape[1:]) + assert len(epsa_out.shape) > 0 and epsa_out.shape[0] == 1 + epsa_out = epsa_out.reshape(epsa_out.shape[1:]) + assert len(chia_out.shape) > 0 and chia_out.shape[0] == 1 + chia_out = chia_out.reshape(chia_out.shape[1:]) + assert len(za_out.shape) > 0 and za_out.shape[0] == 1 + za_out = za_out.reshape(za_out.shape[1:]) + assert len(zetaa_out.shape) > 0 and zetaa_out.shape[0] == 1 + zetaa_out = zetaa_out.reshape(zetaa_out.shape[1:]) + assert len(thetaa_out.shape) > 0 and thetaa_out.shape[0] == 1 + thetaa_out = thetaa_out.reshape(thetaa_out.shape[1:]) + assert len(pa_out.shape) > 0 and pa_out.shape[0] == 1 + pa_out = pa_out.reshape(pa_out.shape[1:]) + assert len(gam_out.shape) > 0 and gam_out.shape[0] == 1 + gam_out = gam_out.reshape(gam_out.shape[1:]) + assert len(phi_out.shape) > 0 and phi_out.shape[0] == 1 + phi_out = phi_out.reshape(phi_out.shape[1:]) + assert len(psi_out.shape) > 0 and psi_out.shape[0] == 1 + psi_out = psi_out.reshape(psi_out.shape[1:]) + + return eps0_out, psia_out, oma_out, bpa_out, bqa_out, pia_out, bpia_out, epsa_out, chia_out, za_out, zetaa_out, thetaa_out, pa_out, gam_out, phi_out, psi_out + + +def pb06(date1, date2): + """ + Wrapper for ERFA function ``eraPb06``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + bzeta : double array + bz : double array + btheta : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a P b 0 6 + - - - - - - - - + + This function forms three Euler angles which implement general + precession from epoch J2000.0, using the IAU 2006 model. Frame + bias (the offset between ICRS and mean J2000.0) is included. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + bzeta double 1st rotation: radians cw around z + bz double 3rd rotation: radians cw around z + btheta double 2nd rotation: radians ccw around y + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The traditional accumulated precession angles zeta_A, z_A, + theta_A cannot be obtained in the usual way, namely through + polynomial expressions, because of the frame bias. The latter + means that two of the angles undergo rapid changes near this + date. They are instead the results of decomposing the + precession-bias matrix obtained by using the Fukushima-Williams + method, which does not suffer from the problem. The + decomposition returns values which can be used in the + conventional formulation and which include frame bias. + + 3) The three angles are returned in the conventional order, which + is not the same as the order of the corresponding Euler + rotations. The precession-bias matrix is + R_3(-z) x R_2(+theta) x R_3(-zeta). + + 4) Should zeta_A, z_A, theta_A angles be required that do not + contain frame bias, they are available by calling the ERFA + function eraP06e. + + Called: + eraPmat06 PB matrix, IAU 2006 + eraRz rotate around Z-axis + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + bzeta_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + bz_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + btheta_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, bzeta_out, bz_out, btheta_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pb06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(bzeta_out.shape) > 0 and bzeta_out.shape[0] == 1 + bzeta_out = bzeta_out.reshape(bzeta_out.shape[1:]) + assert len(bz_out.shape) > 0 and bz_out.shape[0] == 1 + bz_out = bz_out.reshape(bz_out.shape[1:]) + assert len(btheta_out.shape) > 0 and btheta_out.shape[0] == 1 + btheta_out = btheta_out.reshape(btheta_out.shape[1:]) + + return bzeta_out, bz_out, btheta_out + + +def pfw06(date1, date2): + """ + Wrapper for ERFA function ``eraPfw06``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + gamb : double array + phib : double array + psib : double array + epsa : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a P f w 0 6 + - - - - - - - - - + + Precession angles, IAU 2006 (Fukushima-Williams 4-angle formulation). + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + gamb double F-W angle gamma_bar (radians) + phib double F-W angle phi_bar (radians) + psib double F-W angle psi_bar (radians) + epsa double F-W angle epsilon_A (radians) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) Naming the following points: + + e = J2000.0 ecliptic pole, + p = GCRS pole, + E = mean ecliptic pole of date, + and P = mean pole of date, + + the four Fukushima-Williams angles are as follows: + + gamb = gamma_bar = epE + phib = phi_bar = pE + psib = psi_bar = pEP + epsa = epsilon_A = EP + + 3) The matrix representing the combined effects of frame bias and + precession is: + + PxB = R_1(-epsa).R_3(-psib).R_1(phib).R_3(gamb) + + 4) The matrix representing the combined effects of frame bias, + precession and nutation is simply: + + NxPxB = R_1(-epsa-dE).R_3(-psib-dP).R_1(phib).R_3(gamb) + + where dP and dE are the nutation components with respect to the + ecliptic of date. + + Reference: + + Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351 + + Called: + eraObl06 mean obliquity, IAU 2006 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + gamb_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + phib_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + psib_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + epsa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, gamb_out, phib_out, psib_out, epsa_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*4 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pfw06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(gamb_out.shape) > 0 and gamb_out.shape[0] == 1 + gamb_out = gamb_out.reshape(gamb_out.shape[1:]) + assert len(phib_out.shape) > 0 and phib_out.shape[0] == 1 + phib_out = phib_out.reshape(phib_out.shape[1:]) + assert len(psib_out.shape) > 0 and psib_out.shape[0] == 1 + psib_out = psib_out.reshape(psib_out.shape[1:]) + assert len(epsa_out.shape) > 0 and epsa_out.shape[0] == 1 + epsa_out = epsa_out.reshape(epsa_out.shape[1:]) + + return gamb_out, phib_out, psib_out, epsa_out + + +def pmat00(date1, date2): + """ + Wrapper for ERFA function ``eraPmat00``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rbp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P m a t 0 0 + - - - - - - - - - - + + Precession matrix (including frame bias) from GCRS to a specified + date, IAU 2000 model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rbp double[3][3] bias-precession matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(date) = rbp * V(GCRS), where + the p-vector V(GCRS) is with respect to the Geocentric Celestial + Reference System (IAU, 2000) and the p-vector V(date) is with + respect to the mean equatorial triad of the given date. + + Called: + eraBp00 frame bias and precession matrices, IAU 2000 + + Reference: + + IAU: Trans. International Astronomical Union, Vol. XXIVB; Proc. + 24th General Assembly, Manchester, UK. Resolutions B1.3, B1.6. + (2000) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rbp_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pmat00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + + return rbp_out + + +def pmat06(date1, date2): + """ + Wrapper for ERFA function ``eraPmat06``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rbp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P m a t 0 6 + - - - - - - - - - - + + Precession matrix (including frame bias) from GCRS to a specified + date, IAU 2006 model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rbp double[3][3] bias-precession matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(date) = rbp * V(GCRS), where + the p-vector V(GCRS) is with respect to the Geocentric Celestial + Reference System (IAU, 2000) and the p-vector V(date) is with + respect to the mean equatorial triad of the given date. + + Called: + eraPfw06 bias-precession F-W angles, IAU 2006 + eraFw2m F-W angles to r-matrix + + References: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rbp_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pmat06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + + return rbp_out + + +def pmat76(date1, date2): + """ + Wrapper for ERFA function ``eraPmat76``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rmatp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P m a t 7 6 + - - - - - - - - - - + + Precession matrix from J2000.0 to a specified date, IAU 1976 model. + + Given: + date1,date2 double ending date, TT (Note 1) + + Returned: + rmatp double[3][3] precession matrix, J2000.0 -> date1+date2 + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(date) = RMATP * V(J2000), + where the p-vector V(J2000) is with respect to the mean + equatorial triad of epoch J2000.0 and the p-vector V(date) + is with respect to the mean equatorial triad of the given + date. + + 3) Though the matrix method itself is rigorous, the precession + angles are expressed through canonical polynomials which are + valid only for a limited time span. In addition, the IAU 1976 + precession rate is known to be imperfect. The absolute accuracy + of the present formulation is better than 0.1 arcsec from + 1960AD to 2040AD, better than 1 arcsec from 1640AD to 2360AD, + and remains below 3 arcsec for the whole of the period + 500BC to 3000AD. The errors exceed 10 arcsec outside the + range 1200BC to 3900AD, exceed 100 arcsec outside 4200BC to + 5600AD and exceed 1000 arcsec outside 6800BC to 8200AD. + + Called: + eraPrec76 accumulated precession angles, IAU 1976 + eraIr initialize r-matrix to identity + eraRz rotate around Z-axis + eraRy rotate around Y-axis + eraCr copy r-matrix + + References: + + Lieske, J.H., 1979, Astron.Astrophys. 73, 282. + equations (6) & (7), p283. + + Kaplan,G.H., 1981. USNO circular no. 163, pA2. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rmatp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rmatp_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pmat76(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rmatp_out.shape) > 0 and rmatp_out.shape[0] == 1 + rmatp_out = rmatp_out.reshape(rmatp_out.shape[1:]) + + return rmatp_out + + +def pn00(date1, date2, dpsi, deps): + """ + Wrapper for ERFA function ``eraPn00``. + + Parameters + ---------- + date1 : double array + date2 : double array + dpsi : double array + deps : double array + + Returns + ------- + epsa : double array + rb : double array + rp : double array + rbp : double array + rn : double array + rbpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a P n 0 0 + - - - - - - - - + + Precession-nutation, IAU 2000 model: a multi-purpose function, + supporting classical (equinox-based) use directly and CIO-based + use indirectly. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + dpsi,deps double nutation (Note 2) + + Returned: + epsa double mean obliquity (Note 3) + rb double[3][3] frame bias matrix (Note 4) + rp double[3][3] precession matrix (Note 5) + rbp double[3][3] bias-precession matrix (Note 6) + rn double[3][3] nutation matrix (Note 7) + rbpn double[3][3] GCRS-to-true matrix (Note 8) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The caller is responsible for providing the nutation components; + they are in longitude and obliquity, in radians and are with + respect to the equinox and ecliptic of date. For high-accuracy + applications, free core nutation should be included as well as + any other relevant corrections to the position of the CIP. + + 3) The returned mean obliquity is consistent with the IAU 2000 + precession-nutation models. + + 4) The matrix rb transforms vectors from GCRS to J2000.0 mean + equator and equinox by applying frame bias. + + 5) The matrix rp transforms vectors from J2000.0 mean equator and + equinox to mean equator and equinox of date by applying + precession. + + 6) The matrix rbp transforms vectors from GCRS to mean equator and + equinox of date by applying frame bias then precession. It is + the product rp x rb. + + 7) The matrix rn transforms vectors from mean equator and equinox of + date to true equator and equinox of date by applying the nutation + (luni-solar + planetary). + + 8) The matrix rbpn transforms vectors from GCRS to true equator and + equinox of date. It is the product rn x rbp, applying frame + bias, precession and nutation in that order. + + 9) It is permissible to re-use the same array in the returned + arguments. The arrays are filled in the order given. + + Called: + eraPr00 IAU 2000 precession adjustments + eraObl80 mean obliquity, IAU 1980 + eraBp00 frame bias and precession matrices, IAU 2000 + eraCr copy r-matrix + eraNumat form nutation matrix + eraRxr product of two r-matrices + + Reference: + + Capitaine, N., Chapront, J., Lambert, S. and Wallace, P., + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + dpsi_in = numpy.array(dpsi, dtype=numpy.double, order="C", copy=False, subok=True) + deps_in = numpy.array(deps, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, dpsi_in, deps_in) + epsa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_in, deps_in, epsa_out, rb_out[...,0,0], rp_out[...,0,0], rbp_out[...,0,0], rn_out[...,0,0], rbpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*6 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pn00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(epsa_out.shape) > 0 and epsa_out.shape[0] == 1 + epsa_out = epsa_out.reshape(epsa_out.shape[1:]) + assert len(rb_out.shape) > 0 and rb_out.shape[0] == 1 + rb_out = rb_out.reshape(rb_out.shape[1:]) + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + assert len(rn_out.shape) > 0 and rn_out.shape[0] == 1 + rn_out = rn_out.reshape(rn_out.shape[1:]) + assert len(rbpn_out.shape) > 0 and rbpn_out.shape[0] == 1 + rbpn_out = rbpn_out.reshape(rbpn_out.shape[1:]) + + return epsa_out, rb_out, rp_out, rbp_out, rn_out, rbpn_out + + +def pn00a(date1, date2): + """ + Wrapper for ERFA function ``eraPn00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsi : double array + deps : double array + epsa : double array + rb : double array + rp : double array + rbp : double array + rn : double array + rbpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a P n 0 0 a + - - - - - - - - - + + Precession-nutation, IAU 2000A model: a multi-purpose function, + supporting classical (equinox-based) use directly and CIO-based + use indirectly. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsi,deps double nutation (Note 2) + epsa double mean obliquity (Note 3) + rb double[3][3] frame bias matrix (Note 4) + rp double[3][3] precession matrix (Note 5) + rbp double[3][3] bias-precession matrix (Note 6) + rn double[3][3] nutation matrix (Note 7) + rbpn double[3][3] GCRS-to-true matrix (Notes 8,9) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The nutation components (luni-solar + planetary, IAU 2000A) in + longitude and obliquity are in radians and with respect to the + equinox and ecliptic of date. Free core nutation is omitted; + for the utmost accuracy, use the eraPn00 function, where the + nutation components are caller-specified. For faster but + slightly less accurate results, use the eraPn00b function. + + 3) The mean obliquity is consistent with the IAU 2000 precession. + + 4) The matrix rb transforms vectors from GCRS to J2000.0 mean + equator and equinox by applying frame bias. + + 5) The matrix rp transforms vectors from J2000.0 mean equator and + equinox to mean equator and equinox of date by applying + precession. + + 6) The matrix rbp transforms vectors from GCRS to mean equator and + equinox of date by applying frame bias then precession. It is + the product rp x rb. + + 7) The matrix rn transforms vectors from mean equator and equinox + of date to true equator and equinox of date by applying the + nutation (luni-solar + planetary). + + 8) The matrix rbpn transforms vectors from GCRS to true equator and + equinox of date. It is the product rn x rbp, applying frame + bias, precession and nutation in that order. + + 9) The X,Y,Z coordinates of the IAU 2000A Celestial Intermediate + Pole are elements (3,1-3) of the GCRS-to-true matrix, + i.e. rbpn[2][0-2]. + + 10) It is permissible to re-use the same array in the returned + arguments. The arrays are filled in the order given. + + Called: + eraNut00a nutation, IAU 2000A + eraPn00 bias/precession/nutation results, IAU 2000 + + Reference: + + Capitaine, N., Chapront, J., Lambert, S. and Wallace, P., + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + deps_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + epsa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_out, deps_out, epsa_out, rb_out[...,0,0], rp_out[...,0,0], rbp_out[...,0,0], rn_out[...,0,0], rbpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*8 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pn00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsi_out.shape) > 0 and dpsi_out.shape[0] == 1 + dpsi_out = dpsi_out.reshape(dpsi_out.shape[1:]) + assert len(deps_out.shape) > 0 and deps_out.shape[0] == 1 + deps_out = deps_out.reshape(deps_out.shape[1:]) + assert len(epsa_out.shape) > 0 and epsa_out.shape[0] == 1 + epsa_out = epsa_out.reshape(epsa_out.shape[1:]) + assert len(rb_out.shape) > 0 and rb_out.shape[0] == 1 + rb_out = rb_out.reshape(rb_out.shape[1:]) + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + assert len(rn_out.shape) > 0 and rn_out.shape[0] == 1 + rn_out = rn_out.reshape(rn_out.shape[1:]) + assert len(rbpn_out.shape) > 0 and rbpn_out.shape[0] == 1 + rbpn_out = rbpn_out.reshape(rbpn_out.shape[1:]) + + return dpsi_out, deps_out, epsa_out, rb_out, rp_out, rbp_out, rn_out, rbpn_out + + +def pn00b(date1, date2): + """ + Wrapper for ERFA function ``eraPn00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsi : double array + deps : double array + epsa : double array + rb : double array + rp : double array + rbp : double array + rn : double array + rbpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a P n 0 0 b + - - - - - - - - - + + Precession-nutation, IAU 2000B model: a multi-purpose function, + supporting classical (equinox-based) use directly and CIO-based + use indirectly. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsi,deps double nutation (Note 2) + epsa double mean obliquity (Note 3) + rb double[3][3] frame bias matrix (Note 4) + rp double[3][3] precession matrix (Note 5) + rbp double[3][3] bias-precession matrix (Note 6) + rn double[3][3] nutation matrix (Note 7) + rbpn double[3][3] GCRS-to-true matrix (Notes 8,9) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The nutation components (luni-solar + planetary, IAU 2000B) in + longitude and obliquity are in radians and with respect to the + equinox and ecliptic of date. For more accurate results, but + at the cost of increased computation, use the eraPn00a function. + For the utmost accuracy, use the eraPn00 function, where the + nutation components are caller-specified. + + 3) The mean obliquity is consistent with the IAU 2000 precession. + + 4) The matrix rb transforms vectors from GCRS to J2000.0 mean + equator and equinox by applying frame bias. + + 5) The matrix rp transforms vectors from J2000.0 mean equator and + equinox to mean equator and equinox of date by applying + precession. + + 6) The matrix rbp transforms vectors from GCRS to mean equator and + equinox of date by applying frame bias then precession. It is + the product rp x rb. + + 7) The matrix rn transforms vectors from mean equator and equinox + of date to true equator and equinox of date by applying the + nutation (luni-solar + planetary). + + 8) The matrix rbpn transforms vectors from GCRS to true equator and + equinox of date. It is the product rn x rbp, applying frame + bias, precession and nutation in that order. + + 9) The X,Y,Z coordinates of the IAU 2000B Celestial Intermediate + Pole are elements (3,1-3) of the GCRS-to-true matrix, + i.e. rbpn[2][0-2]. + + 10) It is permissible to re-use the same array in the returned + arguments. The arrays are filled in the stated order. + + Called: + eraNut00b nutation, IAU 2000B + eraPn00 bias/precession/nutation results, IAU 2000 + + Reference: + + Capitaine, N., Chapront, J., Lambert, S. and Wallace, P., + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003). + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + deps_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + epsa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_out, deps_out, epsa_out, rb_out[...,0,0], rp_out[...,0,0], rbp_out[...,0,0], rn_out[...,0,0], rbpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*8 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pn00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsi_out.shape) > 0 and dpsi_out.shape[0] == 1 + dpsi_out = dpsi_out.reshape(dpsi_out.shape[1:]) + assert len(deps_out.shape) > 0 and deps_out.shape[0] == 1 + deps_out = deps_out.reshape(deps_out.shape[1:]) + assert len(epsa_out.shape) > 0 and epsa_out.shape[0] == 1 + epsa_out = epsa_out.reshape(epsa_out.shape[1:]) + assert len(rb_out.shape) > 0 and rb_out.shape[0] == 1 + rb_out = rb_out.reshape(rb_out.shape[1:]) + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + assert len(rn_out.shape) > 0 and rn_out.shape[0] == 1 + rn_out = rn_out.reshape(rn_out.shape[1:]) + assert len(rbpn_out.shape) > 0 and rbpn_out.shape[0] == 1 + rbpn_out = rbpn_out.reshape(rbpn_out.shape[1:]) + + return dpsi_out, deps_out, epsa_out, rb_out, rp_out, rbp_out, rn_out, rbpn_out + + +def pn06(date1, date2, dpsi, deps): + """ + Wrapper for ERFA function ``eraPn06``. + + Parameters + ---------- + date1 : double array + date2 : double array + dpsi : double array + deps : double array + + Returns + ------- + epsa : double array + rb : double array + rp : double array + rbp : double array + rn : double array + rbpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a P n 0 6 + - - - - - - - - + + Precession-nutation, IAU 2006 model: a multi-purpose function, + supporting classical (equinox-based) use directly and CIO-based use + indirectly. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + dpsi,deps double nutation (Note 2) + + Returned: + epsa double mean obliquity (Note 3) + rb double[3][3] frame bias matrix (Note 4) + rp double[3][3] precession matrix (Note 5) + rbp double[3][3] bias-precession matrix (Note 6) + rn double[3][3] nutation matrix (Note 7) + rbpn double[3][3] GCRS-to-true matrix (Note 8) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The caller is responsible for providing the nutation components; + they are in longitude and obliquity, in radians and are with + respect to the equinox and ecliptic of date. For high-accuracy + applications, free core nutation should be included as well as + any other relevant corrections to the position of the CIP. + + 3) The returned mean obliquity is consistent with the IAU 2006 + precession. + + 4) The matrix rb transforms vectors from GCRS to J2000.0 mean + equator and equinox by applying frame bias. + + 5) The matrix rp transforms vectors from J2000.0 mean equator and + equinox to mean equator and equinox of date by applying + precession. + + 6) The matrix rbp transforms vectors from GCRS to mean equator and + equinox of date by applying frame bias then precession. It is + the product rp x rb. + + 7) The matrix rn transforms vectors from mean equator and equinox + of date to true equator and equinox of date by applying the + nutation (luni-solar + planetary). + + 8) The matrix rbpn transforms vectors from GCRS to true equator and + equinox of date. It is the product rn x rbp, applying frame + bias, precession and nutation in that order. + + 9) The X,Y,Z coordinates of the Celestial Intermediate Pole are + elements (3,1-3) of the GCRS-to-true matrix, i.e. rbpn[2][0-2]. + + 10) It is permissible to re-use the same array in the returned + arguments. The arrays are filled in the stated order. + + Called: + eraPfw06 bias-precession F-W angles, IAU 2006 + eraFw2m F-W angles to r-matrix + eraCr copy r-matrix + eraTr transpose r-matrix + eraRxr product of two r-matrices + + References: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + dpsi_in = numpy.array(dpsi, dtype=numpy.double, order="C", copy=False, subok=True) + deps_in = numpy.array(deps, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, dpsi_in, deps_in) + epsa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_in, deps_in, epsa_out, rb_out[...,0,0], rp_out[...,0,0], rbp_out[...,0,0], rn_out[...,0,0], rbpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*6 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pn06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(epsa_out.shape) > 0 and epsa_out.shape[0] == 1 + epsa_out = epsa_out.reshape(epsa_out.shape[1:]) + assert len(rb_out.shape) > 0 and rb_out.shape[0] == 1 + rb_out = rb_out.reshape(rb_out.shape[1:]) + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + assert len(rn_out.shape) > 0 and rn_out.shape[0] == 1 + rn_out = rn_out.reshape(rn_out.shape[1:]) + assert len(rbpn_out.shape) > 0 and rbpn_out.shape[0] == 1 + rbpn_out = rbpn_out.reshape(rbpn_out.shape[1:]) + + return epsa_out, rb_out, rp_out, rbp_out, rn_out, rbpn_out + + +def pn06a(date1, date2): + """ + Wrapper for ERFA function ``eraPn06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsi : double array + deps : double array + epsa : double array + rb : double array + rp : double array + rbp : double array + rn : double array + rbpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a P n 0 6 a + - - - - - - - - - + + Precession-nutation, IAU 2006/2000A models: a multi-purpose function, + supporting classical (equinox-based) use directly and CIO-based use + indirectly. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsi,deps double nutation (Note 2) + epsa double mean obliquity (Note 3) + rb double[3][3] frame bias matrix (Note 4) + rp double[3][3] precession matrix (Note 5) + rbp double[3][3] bias-precession matrix (Note 6) + rn double[3][3] nutation matrix (Note 7) + rbpn double[3][3] GCRS-to-true matrix (Notes 8,9) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The nutation components (luni-solar + planetary, IAU 2000A) in + longitude and obliquity are in radians and with respect to the + equinox and ecliptic of date. Free core nutation is omitted; + for the utmost accuracy, use the eraPn06 function, where the + nutation components are caller-specified. + + 3) The mean obliquity is consistent with the IAU 2006 precession. + + 4) The matrix rb transforms vectors from GCRS to mean J2000.0 by + applying frame bias. + + 5) The matrix rp transforms vectors from mean J2000.0 to mean of + date by applying precession. + + 6) The matrix rbp transforms vectors from GCRS to mean of date by + applying frame bias then precession. It is the product rp x rb. + + 7) The matrix rn transforms vectors from mean of date to true of + date by applying the nutation (luni-solar + planetary). + + 8) The matrix rbpn transforms vectors from GCRS to true of date + (CIP/equinox). It is the product rn x rbp, applying frame bias, + precession and nutation in that order. + + 9) The X,Y,Z coordinates of the IAU 2006/2000A Celestial + Intermediate Pole are elements (3,1-3) of the GCRS-to-true + matrix, i.e. rbpn[2][0-2]. + + 10) It is permissible to re-use the same array in the returned + arguments. The arrays are filled in the stated order. + + Called: + eraNut06a nutation, IAU 2006/2000A + eraPn06 bias/precession/nutation results, IAU 2006 + + Reference: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + deps_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + epsa_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbp_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + rbpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsi_out, deps_out, epsa_out, rb_out[...,0,0], rp_out[...,0,0], rbp_out[...,0,0], rn_out[...,0,0], rbpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*8 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pn06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsi_out.shape) > 0 and dpsi_out.shape[0] == 1 + dpsi_out = dpsi_out.reshape(dpsi_out.shape[1:]) + assert len(deps_out.shape) > 0 and deps_out.shape[0] == 1 + deps_out = deps_out.reshape(deps_out.shape[1:]) + assert len(epsa_out.shape) > 0 and epsa_out.shape[0] == 1 + epsa_out = epsa_out.reshape(epsa_out.shape[1:]) + assert len(rb_out.shape) > 0 and rb_out.shape[0] == 1 + rb_out = rb_out.reshape(rb_out.shape[1:]) + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + assert len(rbp_out.shape) > 0 and rbp_out.shape[0] == 1 + rbp_out = rbp_out.reshape(rbp_out.shape[1:]) + assert len(rn_out.shape) > 0 and rn_out.shape[0] == 1 + rn_out = rn_out.reshape(rn_out.shape[1:]) + assert len(rbpn_out.shape) > 0 and rbpn_out.shape[0] == 1 + rbpn_out = rbpn_out.reshape(rbpn_out.shape[1:]) + + return dpsi_out, deps_out, epsa_out, rb_out, rp_out, rbp_out, rn_out, rbpn_out + + +def pnm00a(date1, date2): + """ + Wrapper for ERFA function ``eraPnm00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rbpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P n m 0 0 a + - - - - - - - - - - + + Form the matrix of precession-nutation for a given date (including + frame bias), equinox-based, IAU 2000A model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rbpn double[3][3] classical NPB matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(date) = rbpn * V(GCRS), where + the p-vector V(date) is with respect to the true equatorial triad + of date date1+date2 and the p-vector V(GCRS) is with respect to + the Geocentric Celestial Reference System (IAU, 2000). + + 3) A faster, but slightly less accurate result (about 1 mas), can be + obtained by using instead the eraPnm00b function. + + Called: + eraPn00a bias/precession/nutation, IAU 2000A + + Reference: + + IAU: Trans. International Astronomical Union, Vol. XXIVB; Proc. + 24th General Assembly, Manchester, UK. Resolutions B1.3, B1.6. + (2000) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rbpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rbpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pnm00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rbpn_out.shape) > 0 and rbpn_out.shape[0] == 1 + rbpn_out = rbpn_out.reshape(rbpn_out.shape[1:]) + + return rbpn_out + + +def pnm00b(date1, date2): + """ + Wrapper for ERFA function ``eraPnm00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rbpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P n m 0 0 b + - - - - - - - - - - + + Form the matrix of precession-nutation for a given date (including + frame bias), equinox-based, IAU 2000B model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rbpn double[3][3] bias-precession-nutation matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(date) = rbpn * V(GCRS), where + the p-vector V(date) is with respect to the true equatorial triad + of date date1+date2 and the p-vector V(GCRS) is with respect to + the Geocentric Celestial Reference System (IAU, 2000). + + 3) The present function is faster, but slightly less accurate (about + 1 mas), than the eraPnm00a function. + + Called: + eraPn00b bias/precession/nutation, IAU 2000B + + Reference: + + IAU: Trans. International Astronomical Union, Vol. XXIVB; Proc. + 24th General Assembly, Manchester, UK. Resolutions B1.3, B1.6. + (2000) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rbpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rbpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pnm00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rbpn_out.shape) > 0 and rbpn_out.shape[0] == 1 + rbpn_out = rbpn_out.reshape(rbpn_out.shape[1:]) + + return rbpn_out + + +def pnm06a(date1, date2): + """ + Wrapper for ERFA function ``eraPnm06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rnpb : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P n m 0 6 a + - - - - - - - - - - + + Form the matrix of precession-nutation for a given date (including + frame bias), IAU 2006 precession and IAU 2000A nutation models. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + rnpb double[3][3] bias-precession-nutation matrix (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(date) = rnpb * V(GCRS), where + the p-vector V(date) is with respect to the true equatorial triad + of date date1+date2 and the p-vector V(GCRS) is with respect to + the Geocentric Celestial Reference System (IAU, 2000). + + Called: + eraPfw06 bias-precession F-W angles, IAU 2006 + eraNut06a nutation, IAU 2006/2000A + eraFw2m F-W angles to r-matrix + + Reference: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rnpb_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rnpb_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pnm06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rnpb_out.shape) > 0 and rnpb_out.shape[0] == 1 + rnpb_out = rnpb_out.reshape(rnpb_out.shape[1:]) + + return rnpb_out + + +def pnm80(date1, date2): + """ + Wrapper for ERFA function ``eraPnm80``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rmatpn : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a P n m 8 0 + - - - - - - - - - + + Form the matrix of precession/nutation for a given date, IAU 1976 + precession model, IAU 1980 nutation model. + + Given: + date1,date2 double TDB date (Note 1) + + Returned: + rmatpn double[3][3] combined precession/nutation matrix + + Notes: + + 1) The TDB date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TDB)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The matrix operates in the sense V(date) = rmatpn * V(J2000), + where the p-vector V(date) is with respect to the true equatorial + triad of date date1+date2 and the p-vector V(J2000) is with + respect to the mean equatorial triad of epoch J2000.0. + + Called: + eraPmat76 precession matrix, IAU 1976 + eraNutm80 nutation matrix, IAU 1980 + eraRxr product of two r-matrices + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 3.3 (p145). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rmatpn_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rmatpn_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pnm80(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rmatpn_out.shape) > 0 and rmatpn_out.shape[0] == 1 + rmatpn_out = rmatpn_out.reshape(rmatpn_out.shape[1:]) + + return rmatpn_out + + +def pom00(xp, yp, sp): + """ + Wrapper for ERFA function ``eraPom00``. + + Parameters + ---------- + xp : double array + yp : double array + sp : double array + + Returns + ------- + rpom : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P o m 0 0 + - - - - - - - - - - + + Form the matrix of polar motion for a given date, IAU 2000. + + Given: + xp,yp double coordinates of the pole (radians, Note 1) + sp double the TIO locator s' (radians, Note 2) + + Returned: + rpom double[3][3] polar-motion matrix (Note 3) + + Notes: + + 1) The arguments xp and yp are the coordinates (in radians) of the + Celestial Intermediate Pole with respect to the International + Terrestrial Reference System (see IERS Conventions 2003), + measured along the meridians to 0 and 90 deg west respectively. + + 2) The argument sp is the TIO locator s', in radians, which + positions the Terrestrial Intermediate Origin on the equator. It + is obtained from polar motion observations by numerical + integration, and so is in essence unpredictable. However, it is + dominated by a secular drift of about 47 microarcseconds per + century, and so can be taken into account by using s' = -47*t, + where t is centuries since J2000.0. The function eraSp00 + implements this approximation. + + 3) The matrix operates in the sense V(TRS) = rpom * V(CIP), meaning + that it is the final rotation when computing the pointing + direction to a celestial source. + + Called: + eraIr initialize r-matrix to identity + eraRz rotate around Z-axis + eraRy rotate around Y-axis + eraRx rotate around X-axis + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + xp_in = numpy.array(xp, dtype=numpy.double, order="C", copy=False, subok=True) + yp_in = numpy.array(yp, dtype=numpy.double, order="C", copy=False, subok=True) + sp_in = numpy.array(sp, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), xp_in, yp_in, sp_in) + rpom_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [xp_in, yp_in, sp_in, rpom_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pom00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rpom_out.shape) > 0 and rpom_out.shape[0] == 1 + rpom_out = rpom_out.reshape(rpom_out.shape[1:]) + + return rpom_out + + +def pr00(date1, date2): + """ + Wrapper for ERFA function ``eraPr00``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + dpsipr : double array + depspr : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a P r 0 0 + - - - - - - - - + + Precession-rate part of the IAU 2000 precession-nutation models + (part of MHB2000). + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + dpsipr,depspr double precession corrections (Notes 2,3) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The precession adjustments are expressed as "nutation + components", corrections in longitude and obliquity with respect + to the J2000.0 equinox and ecliptic. + + 3) Although the precession adjustments are stated to be with respect + to Lieske et al. (1977), the MHB2000 model does not specify which + set of Euler angles are to be used and how the adjustments are to + be applied. The most literal and straightforward procedure is to + adopt the 4-rotation epsilon_0, psi_A, omega_A, xi_A option, and + to add dpsipr to psi_A and depspr to both omega_A and eps_A. + + 4) This is an implementation of one aspect of the IAU 2000A nutation + model, formally adopted by the IAU General Assembly in 2000, + namely MHB2000 (Mathews et al. 2002). + + References: + + Lieske, J.H., Lederle, T., Fricke, W. & Morando, B., "Expressions + for the precession quantities based upon the IAU (1976) System of + Astronomical Constants", Astron.Astrophys., 58, 1-16 (1977) + + Mathews, P.M., Herring, T.A., Buffet, B.A., "Modeling of nutation + and precession New nutation series for nonrigid Earth and + insights into the Earth's interior", J.Geophys.Res., 107, B4, + 2002. The MHB2000 code itself was obtained on 9th September 2002 + from ftp://maia.usno.navy.mil/conv2000/chapter5/IAU2000A. + + Wallace, P.T., "Software for Implementing the IAU 2000 + Resolutions", in IERS Workshop 5.1 (2002). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + dpsipr_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + depspr_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dpsipr_out, depspr_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pr00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dpsipr_out.shape) > 0 and dpsipr_out.shape[0] == 1 + dpsipr_out = dpsipr_out.reshape(dpsipr_out.shape[1:]) + assert len(depspr_out.shape) > 0 and depspr_out.shape[0] == 1 + depspr_out = depspr_out.reshape(depspr_out.shape[1:]) + + return dpsipr_out, depspr_out + + +def prec76(date01, date02, date11, date12): + """ + Wrapper for ERFA function ``eraPrec76``. + + Parameters + ---------- + date01 : double array + date02 : double array + date11 : double array + date12 : double array + + Returns + ------- + zeta : double array + z : double array + theta : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P r e c 7 6 + - - - - - - - - - - + + IAU 1976 precession model. + + This function forms the three Euler angles which implement general + precession between two dates, using the IAU 1976 model (as for the + FK5 catalog). + + Given: + date01,date02 double TDB starting date (Note 1) + date11,date12 double TDB ending date (Note 1) + + Returned: + zeta double 1st rotation: radians cw around z + z double 3rd rotation: radians cw around z + theta double 2nd rotation: radians ccw around y + + Notes: + + 1) The dates date01+date02 and date11+date12 are Julian Dates, + apportioned in any convenient way between the arguments daten1 + and daten2. For example, JD(TDB)=2450123.7 could be expressed in + any of these ways, among others: + + daten1 daten2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 method is best matched to the way the + argument is handled internally and will deliver the optimum + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + The two dates may be expressed using different methods, but at + the risk of losing some resolution. + + 2) The accumulated precession angles zeta, z, theta are expressed + through canonical polynomials which are valid only for a limited + time span. In addition, the IAU 1976 precession rate is known to + be imperfect. The absolute accuracy of the present formulation + is better than 0.1 arcsec from 1960AD to 2040AD, better than + 1 arcsec from 1640AD to 2360AD, and remains below 3 arcsec for + the whole of the period 500BC to 3000AD. The errors exceed + 10 arcsec outside the range 1200BC to 3900AD, exceed 100 arcsec + outside 4200BC to 5600AD and exceed 1000 arcsec outside 6800BC to + 8200AD. + + 3) The three angles are returned in the conventional order, which + is not the same as the order of the corresponding Euler + rotations. The precession matrix is + R_3(-z) x R_2(+theta) x R_3(-zeta). + + Reference: + + Lieske, J.H., 1979, Astron.Astrophys. 73, 282, equations + (6) & (7), p283. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date01_in = numpy.array(date01, dtype=numpy.double, order="C", copy=False, subok=True) + date02_in = numpy.array(date02, dtype=numpy.double, order="C", copy=False, subok=True) + date11_in = numpy.array(date11, dtype=numpy.double, order="C", copy=False, subok=True) + date12_in = numpy.array(date12, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date01_in, date02_in, date11_in, date12_in) + zeta_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + z_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + theta_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date01_in, date02_in, date11_in, date12_in, zeta_out, z_out, theta_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._prec76(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(zeta_out.shape) > 0 and zeta_out.shape[0] == 1 + zeta_out = zeta_out.reshape(zeta_out.shape[1:]) + assert len(z_out.shape) > 0 and z_out.shape[0] == 1 + z_out = z_out.reshape(z_out.shape[1:]) + assert len(theta_out.shape) > 0 and theta_out.shape[0] == 1 + theta_out = theta_out.reshape(theta_out.shape[1:]) + + return zeta_out, z_out, theta_out + + +def s00(date1, date2, x, y): + """ + Wrapper for ERFA function ``eraS00``. + + Parameters + ---------- + date1 : double array + date2 : double array + x : double array + y : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a S 0 0 + - - - - - - - + + The CIO locator s, positioning the Celestial Intermediate Origin on + the equator of the Celestial Intermediate Pole, given the CIP's X,Y + coordinates. Compatible with IAU 2000A precession-nutation. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + x,y double CIP coordinates (Note 3) + + Returned (function value): + double the CIO locator s in radians (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The CIO locator s is the difference between the right ascensions + of the same point in two systems: the two systems are the GCRS + and the CIP,CIO, and the point is the ascending node of the + CIP equator. The quantity s remains below 0.1 arcsecond + throughout 1900-2100. + + 3) The series used to compute s is in fact for s+XY/2, where X and Y + are the x and y components of the CIP unit vector; this series + is more compact than a direct series for s would be. This + function requires X,Y to be supplied by the caller, who is + responsible for providing values that are consistent with the + supplied date. + + 4) The model is consistent with the IAU 2000A precession-nutation. + + Called: + eraFal03 mean anomaly of the Moon + eraFalp03 mean anomaly of the Sun + eraFaf03 mean argument of the latitude of the Moon + eraFad03 mean elongation of the Moon from the Sun + eraFaom03 mean longitude of the Moon's ascending node + eraFave03 mean longitude of Venus + eraFae03 mean longitude of Earth + eraFapa03 general accumulated precession in longitude + + References: + + Capitaine, N., Chapront, J., Lambert, S. and Wallace, P., + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + x_in = numpy.array(x, dtype=numpy.double, order="C", copy=False, subok=True) + y_in = numpy.array(y, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, x_in, y_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, x_in, y_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def s00a(date1, date2): + """ + Wrapper for ERFA function ``eraS00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a S 0 0 a + - - - - - - - - + + The CIO locator s, positioning the Celestial Intermediate Origin on + the equator of the Celestial Intermediate Pole, using the IAU 2000A + precession-nutation model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double the CIO locator s in radians (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The CIO locator s is the difference between the right ascensions + of the same point in two systems. The two systems are the GCRS + and the CIP,CIO, and the point is the ascending node of the + CIP equator. The CIO locator s remains a small fraction of + 1 arcsecond throughout 1900-2100. + + 3) The series used to compute s is in fact for s+XY/2, where X and Y + are the x and y components of the CIP unit vector; this series + is more compact than a direct series for s would be. The present + function uses the full IAU 2000A nutation model when predicting + the CIP position. Faster results, with no significant loss of + accuracy, can be obtained via the function eraS00b, which uses + instead the IAU 2000B truncated model. + + Called: + eraPnm00a classical NPB matrix, IAU 2000A + eraBnp2xy extract CIP X,Y from the BPN matrix + eraS00 the CIO locator s, given X,Y, IAU 2000A + + References: + + Capitaine, N., Chapront, J., Lambert, S. and Wallace, P., + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def s00b(date1, date2): + """ + Wrapper for ERFA function ``eraS00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a S 0 0 b + - - - - - - - - + + The CIO locator s, positioning the Celestial Intermediate Origin on + the equator of the Celestial Intermediate Pole, using the IAU 2000B + precession-nutation model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double the CIO locator s in radians (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The CIO locator s is the difference between the right ascensions + of the same point in two systems. The two systems are the GCRS + and the CIP,CIO, and the point is the ascending node of the + CIP equator. The CIO locator s remains a small fraction of + 1 arcsecond throughout 1900-2100. + + 3) The series used to compute s is in fact for s+XY/2, where X and Y + are the x and y components of the CIP unit vector; this series + is more compact than a direct series for s would be. The present + function uses the IAU 2000B truncated nutation model when + predicting the CIP position. The function eraS00a uses instead + the full IAU 2000A model, but with no significant increase in + accuracy and at some cost in speed. + + Called: + eraPnm00b classical NPB matrix, IAU 2000B + eraBnp2xy extract CIP X,Y from the BPN matrix + eraS00 the CIO locator s, given X,Y, IAU 2000A + + References: + + Capitaine, N., Chapront, J., Lambert, S. and Wallace, P., + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def s06(date1, date2, x, y): + """ + Wrapper for ERFA function ``eraS06``. + + Parameters + ---------- + date1 : double array + date2 : double array + x : double array + y : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a S 0 6 + - - - - - - - + + The CIO locator s, positioning the Celestial Intermediate Origin on + the equator of the Celestial Intermediate Pole, given the CIP's X,Y + coordinates. Compatible with IAU 2006/2000A precession-nutation. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + x,y double CIP coordinates (Note 3) + + Returned (function value): + double the CIO locator s in radians (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The CIO locator s is the difference between the right ascensions + of the same point in two systems: the two systems are the GCRS + and the CIP,CIO, and the point is the ascending node of the + CIP equator. The quantity s remains below 0.1 arcsecond + throughout 1900-2100. + + 3) The series used to compute s is in fact for s+XY/2, where X and Y + are the x and y components of the CIP unit vector; this series + is more compact than a direct series for s would be. This + function requires X,Y to be supplied by the caller, who is + responsible for providing values that are consistent with the + supplied date. + + 4) The model is consistent with the "P03" precession (Capitaine et + al. 2003), adopted by IAU 2006 Resolution 1, 2006, and the + IAU 2000A nutation (with P03 adjustments). + + Called: + eraFal03 mean anomaly of the Moon + eraFalp03 mean anomaly of the Sun + eraFaf03 mean argument of the latitude of the Moon + eraFad03 mean elongation of the Moon from the Sun + eraFaom03 mean longitude of the Moon's ascending node + eraFave03 mean longitude of Venus + eraFae03 mean longitude of Earth + eraFapa03 general accumulated precession in longitude + + References: + + Capitaine, N., Wallace, P.T. & Chapront, J., 2003, Astron. + Astrophys. 432, 355 + + McCarthy, D.D., Petit, G. (eds.) 2004, IERS Conventions (2003), + IERS Technical Note No. 32, BKG + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + x_in = numpy.array(x, dtype=numpy.double, order="C", copy=False, subok=True) + y_in = numpy.array(y, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, x_in, y_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, x_in, y_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def s06a(date1, date2): + """ + Wrapper for ERFA function ``eraS06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a S 0 6 a + - - - - - - - - + + The CIO locator s, positioning the Celestial Intermediate Origin on + the equator of the Celestial Intermediate Pole, using the IAU 2006 + precession and IAU 2000A nutation models. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double the CIO locator s in radians (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The CIO locator s is the difference between the right ascensions + of the same point in two systems. The two systems are the GCRS + and the CIP,CIO, and the point is the ascending node of the + CIP equator. The CIO locator s remains a small fraction of + 1 arcsecond throughout 1900-2100. + + 3) The series used to compute s is in fact for s+XY/2, where X and Y + are the x and y components of the CIP unit vector; this series is + more compact than a direct series for s would be. The present + function uses the full IAU 2000A nutation model when predicting + the CIP position. + + Called: + eraPnm06a classical NPB matrix, IAU 2006/2000A + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS06 the CIO locator s, given X,Y, IAU 2006 + + References: + + Capitaine, N., Chapront, J., Lambert, S. and Wallace, P., + "Expressions for the Celestial Intermediate Pole and Celestial + Ephemeris Origin consistent with the IAU 2000A precession- + nutation model", Astron.Astrophys. 400, 1145-1154 (2003) + + n.b. The celestial ephemeris origin (CEO) was renamed "celestial + intermediate origin" (CIO) by IAU 2006 Resolution 2. + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + McCarthy, D. D., Petit, G. (eds.), 2004, IERS Conventions (2003), + IERS Technical Note No. 32, BKG + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def sp00(date1, date2): + """ + Wrapper for ERFA function ``eraSp00``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a S p 0 0 + - - - - - - - - + + The TIO locator s', positioning the Terrestrial Intermediate Origin + on the equator of the Celestial Intermediate Pole. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double the TIO locator s' in radians (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The TIO locator s' is obtained from polar motion observations by + numerical integration, and so is in essence unpredictable. + However, it is dominated by a secular drift of about + 47 microarcseconds per century, which is the approximation + evaluated by the present function. + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._sp00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def xy06(date1, date2): + """ + Wrapper for ERFA function ``eraXy06``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + x : double array + y : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a X y 0 6 + - - - - - - - - + + X,Y coordinates of celestial intermediate pole from series based + on IAU 2006 precession and IAU 2000A nutation. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + x,y double CIP X,Y coordinates (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The X,Y coordinates are those of the unit vector towards the + celestial intermediate pole. They represent the combined effects + of frame bias, precession and nutation. + + 3) The fundamental arguments used are as adopted in IERS Conventions + (2003) and are from Simon et al. (1994) and Souchay et al. + (1999). + + 4) This is an alternative to the angles-based method, via the ERFA + function eraFw2xy and as used in eraXys06a for example. The two + methods agree at the 1 microarcsecond level (at present), a + negligible amount compared with the intrinsic accuracy of the + models. However, it would be unwise to mix the two methods + (angles-based and series-based) in a single application. + + Called: + eraFal03 mean anomaly of the Moon + eraFalp03 mean anomaly of the Sun + eraFaf03 mean argument of the latitude of the Moon + eraFad03 mean elongation of the Moon from the Sun + eraFaom03 mean longitude of the Moon's ascending node + eraFame03 mean longitude of Mercury + eraFave03 mean longitude of Venus + eraFae03 mean longitude of Earth + eraFama03 mean longitude of Mars + eraFaju03 mean longitude of Jupiter + eraFasa03 mean longitude of Saturn + eraFaur03 mean longitude of Uranus + eraFane03 mean longitude of Neptune + eraFapa03 general accumulated precession in longitude + + References: + + Capitaine, N., Wallace, P.T. & Chapront, J., 2003, + Astron.Astrophys., 412, 567 + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + McCarthy, D. D., Petit, G. (eds.), 2004, IERS Conventions (2003), + IERS Technical Note No. 32, BKG + + Simon, J.L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G. & Laskar, J., Astron.Astrophys., 1994, 282, 663 + + Souchay, J., Loysel, B., Kinoshita, H., Folgueira, M., 1999, + Astron.Astrophys.Supp.Ser. 135, 111 + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + x_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + y_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, x_out, y_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._xy06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(x_out.shape) > 0 and x_out.shape[0] == 1 + x_out = x_out.reshape(x_out.shape[1:]) + assert len(y_out.shape) > 0 and y_out.shape[0] == 1 + y_out = y_out.reshape(y_out.shape[1:]) + + return x_out, y_out + + +def xys00a(date1, date2): + """ + Wrapper for ERFA function ``eraXys00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + x : double array + y : double array + s : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a X y s 0 0 a + - - - - - - - - - - + + For a given TT date, compute the X,Y coordinates of the Celestial + Intermediate Pole and the CIO locator s, using the IAU 2000A + precession-nutation model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + x,y double Celestial Intermediate Pole (Note 2) + s double the CIO locator s (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The Celestial Intermediate Pole coordinates are the x,y + components of the unit vector in the Geocentric Celestial + Reference System. + + 3) The CIO locator s (in radians) positions the Celestial + Intermediate Origin on the equator of the CIP. + + 4) A faster, but slightly less accurate result (about 1 mas for + X,Y), can be obtained by using instead the eraXys00b function. + + Called: + eraPnm00a classical NPB matrix, IAU 2000A + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS00 the CIO locator s, given X,Y, IAU 2000A + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + x_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + y_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + s_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, x_out, y_out, s_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._xys00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(x_out.shape) > 0 and x_out.shape[0] == 1 + x_out = x_out.reshape(x_out.shape[1:]) + assert len(y_out.shape) > 0 and y_out.shape[0] == 1 + y_out = y_out.reshape(y_out.shape[1:]) + assert len(s_out.shape) > 0 and s_out.shape[0] == 1 + s_out = s_out.reshape(s_out.shape[1:]) + + return x_out, y_out, s_out + + +def xys00b(date1, date2): + """ + Wrapper for ERFA function ``eraXys00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + x : double array + y : double array + s : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a X y s 0 0 b + - - - - - - - - - - + + For a given TT date, compute the X,Y coordinates of the Celestial + Intermediate Pole and the CIO locator s, using the IAU 2000B + precession-nutation model. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + x,y double Celestial Intermediate Pole (Note 2) + s double the CIO locator s (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The Celestial Intermediate Pole coordinates are the x,y + components of the unit vector in the Geocentric Celestial + Reference System. + + 3) The CIO locator s (in radians) positions the Celestial + Intermediate Origin on the equator of the CIP. + + 4) The present function is faster, but slightly less accurate (about + 1 mas in X,Y), than the eraXys00a function. + + Called: + eraPnm00b classical NPB matrix, IAU 2000B + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS00 the CIO locator s, given X,Y, IAU 2000A + + Reference: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + x_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + y_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + s_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, x_out, y_out, s_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._xys00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(x_out.shape) > 0 and x_out.shape[0] == 1 + x_out = x_out.reshape(x_out.shape[1:]) + assert len(y_out.shape) > 0 and y_out.shape[0] == 1 + y_out = y_out.reshape(y_out.shape[1:]) + assert len(s_out.shape) > 0 and s_out.shape[0] == 1 + s_out = s_out.reshape(s_out.shape[1:]) + + return x_out, y_out, s_out + + +def xys06a(date1, date2): + """ + Wrapper for ERFA function ``eraXys06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + x : double array + y : double array + s : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a X y s 0 6 a + - - - - - - - - - - + + For a given TT date, compute the X,Y coordinates of the Celestial + Intermediate Pole and the CIO locator s, using the IAU 2006 + precession and IAU 2000A nutation models. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned: + x,y double Celestial Intermediate Pole (Note 2) + s double the CIO locator s (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The Celestial Intermediate Pole coordinates are the x,y components + of the unit vector in the Geocentric Celestial Reference System. + + 3) The CIO locator s (in radians) positions the Celestial + Intermediate Origin on the equator of the CIP. + + 4) Series-based solutions for generating X and Y are also available: + see Capitaine & Wallace (2006) and eraXy06. + + Called: + eraPnm06a classical NPB matrix, IAU 2006/2000A + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS06 the CIO locator s, given X,Y, IAU 2006 + + References: + + Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855 + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + x_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + y_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + s_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, x_out, y_out, s_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._xys06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(x_out.shape) > 0 and x_out.shape[0] == 1 + x_out = x_out.reshape(x_out.shape[1:]) + assert len(y_out.shape) > 0 and y_out.shape[0] == 1 + y_out = y_out.reshape(y_out.shape[1:]) + assert len(s_out.shape) > 0 and s_out.shape[0] == 1 + s_out = s_out.reshape(s_out.shape[1:]) + + return x_out, y_out, s_out + + +def ee00(date1, date2, epsa, dpsi): + """ + Wrapper for ERFA function ``eraEe00``. + + Parameters + ---------- + date1 : double array + date2 : double array + epsa : double array + dpsi : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a E e 0 0 + - - - - - - - - + + The equation of the equinoxes, compatible with IAU 2000 resolutions, + given the nutation in longitude and the mean obliquity. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + epsa double mean obliquity (Note 2) + dpsi double nutation in longitude (Note 3) + + Returned (function value): + double equation of the equinoxes (Note 4) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The obliquity, in radians, is mean of date. + + 3) The result, which is in radians, operates in the following sense: + + Greenwich apparent ST = GMST + equation of the equinoxes + + 4) The result is compatible with the IAU 2000 resolutions. For + further details, see IERS Conventions 2003 and Capitaine et al. + (2002). + + Called: + eraEect00 equation of the equinoxes complementary terms + + References: + + Capitaine, N., Wallace, P.T. and McCarthy, D.D., "Expressions to + implement the IAU 2000 definition of UT1", Astronomy & + Astrophysics, 406, 1135-1149 (2003) + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + epsa_in = numpy.array(epsa, dtype=numpy.double, order="C", copy=False, subok=True) + dpsi_in = numpy.array(dpsi, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, epsa_in, dpsi_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, epsa_in, dpsi_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ee00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def ee00a(date1, date2): + """ + Wrapper for ERFA function ``eraEe00a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E e 0 0 a + - - - - - - - - - + + Equation of the equinoxes, compatible with IAU 2000 resolutions. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double equation of the equinoxes (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The result, which is in radians, operates in the following sense: + + Greenwich apparent ST = GMST + equation of the equinoxes + + 3) The result is compatible with the IAU 2000 resolutions. For + further details, see IERS Conventions 2003 and Capitaine et al. + (2002). + + Called: + eraPr00 IAU 2000 precession adjustments + eraObl80 mean obliquity, IAU 1980 + eraNut00a nutation, IAU 2000A + eraEe00 equation of the equinoxes, IAU 2000 + + References: + + Capitaine, N., Wallace, P.T. and McCarthy, D.D., "Expressions to + implement the IAU 2000 definition of UT1", Astronomy & + Astrophysics, 406, 1135-1149 (2003). + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ee00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def ee00b(date1, date2): + """ + Wrapper for ERFA function ``eraEe00b``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E e 0 0 b + - - - - - - - - - + + Equation of the equinoxes, compatible with IAU 2000 resolutions but + using the truncated nutation model IAU 2000B. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double equation of the equinoxes (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The result, which is in radians, operates in the following sense: + + Greenwich apparent ST = GMST + equation of the equinoxes + + 3) The result is compatible with the IAU 2000 resolutions except + that accuracy has been compromised for the sake of speed. For + further details, see McCarthy & Luzum (2001), IERS Conventions + 2003 and Capitaine et al. (2003). + + Called: + eraPr00 IAU 2000 precession adjustments + eraObl80 mean obliquity, IAU 1980 + eraNut00b nutation, IAU 2000B + eraEe00 equation of the equinoxes, IAU 2000 + + References: + + Capitaine, N., Wallace, P.T. and McCarthy, D.D., "Expressions to + implement the IAU 2000 definition of UT1", Astronomy & + Astrophysics, 406, 1135-1149 (2003) + + McCarthy, D.D. & Luzum, B.J., "An abridged model of the + precession-nutation of the celestial pole", Celestial Mechanics & + Dynamical Astronomy, 85, 37-49 (2003) + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ee00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def ee06a(date1, date2): + """ + Wrapper for ERFA function ``eraEe06a``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E e 0 6 a + - - - - - - - - - + + Equation of the equinoxes, compatible with IAU 2000 resolutions and + IAU 2006/2000A precession-nutation. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double equation of the equinoxes (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The result, which is in radians, operates in the following sense: + + Greenwich apparent ST = GMST + equation of the equinoxes + + Called: + eraAnpm normalize angle into range +/- pi + eraGst06a Greenwich apparent sidereal time, IAU 2006/2000A + eraGmst06 Greenwich mean sidereal time, IAU 2006 + + Reference: + + McCarthy, D. D., Petit, G. (eds.), 2004, IERS Conventions (2003), + IERS Technical Note No. 32, BKG + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ee06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def eect00(date1, date2): + """ + Wrapper for ERFA function ``eraEect00``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a E e c t 0 0 + - - - - - - - - - - + + Equation of the equinoxes complementary terms, consistent with + IAU 2000 resolutions. + + Given: + date1,date2 double TT as a 2-part Julian Date (Note 1) + + Returned (function value): + double complementary terms (Note 2) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The "complementary terms" are part of the equation of the + equinoxes (EE), classically the difference between apparent and + mean Sidereal Time: + + GAST = GMST + EE + + with: + + EE = dpsi * cos(eps) + + where dpsi is the nutation in longitude and eps is the obliquity + of date. However, if the rotation of the Earth were constant in + an inertial frame the classical formulation would lead to + apparent irregularities in the UT1 timescale traceable to side- + effects of precession-nutation. In order to eliminate these + effects from UT1, "complementary terms" were introduced in 1994 + (IAU, 1994) and took effect from 1997 (Capitaine and Gontier, + 1993): + + GAST = GMST + CT + EE + + By convention, the complementary terms are included as part of + the equation of the equinoxes rather than as part of the mean + Sidereal Time. This slightly compromises the "geometrical" + interpretation of mean sidereal time but is otherwise + inconsequential. + + The present function computes CT in the above expression, + compatible with IAU 2000 resolutions (Capitaine et al., 2002, and + IERS Conventions 2003). + + Called: + eraFal03 mean anomaly of the Moon + eraFalp03 mean anomaly of the Sun + eraFaf03 mean argument of the latitude of the Moon + eraFad03 mean elongation of the Moon from the Sun + eraFaom03 mean longitude of the Moon's ascending node + eraFave03 mean longitude of Venus + eraFae03 mean longitude of Earth + eraFapa03 general accumulated precession in longitude + + References: + + Capitaine, N. & Gontier, A.-M., Astron. Astrophys., 275, + 645-650 (1993) + + Capitaine, N., Wallace, P.T. and McCarthy, D.D., "Expressions to + implement the IAU 2000 definition of UT1", Astronomy & + Astrophysics, 406, 1135-1149 (2003) + + IAU Resolution C7, Recommendation 3 (1994) + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._eect00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def eqeq94(date1, date2): + """ + Wrapper for ERFA function ``eraEqeq94``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a E q e q 9 4 + - - - - - - - - - - + + Equation of the equinoxes, IAU 1994 model. + + Given: + date1,date2 double TDB date (Note 1) + + Returned (function value): + double equation of the equinoxes (Note 2) + + Notes: + + 1) The date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The result, which is in radians, operates in the following sense: + + Greenwich apparent ST = GMST + equation of the equinoxes + + Called: + eraAnpm normalize angle into range +/- pi + eraNut80 nutation, IAU 1980 + eraObl80 mean obliquity, IAU 1980 + + References: + + IAU Resolution C7, Recommendation 3 (1994). + + Capitaine, N. & Gontier, A.-M., 1993, Astron. Astrophys., 275, + 645-650. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._eqeq94(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def era00(dj1, dj2): + """ + Wrapper for ERFA function ``eraEra00``. + + Parameters + ---------- + dj1 : double array + dj2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E r a 0 0 + - - - - - - - - - + + Earth rotation angle (IAU 2000 model). + + Given: + dj1,dj2 double UT1 as a 2-part Julian Date (see note) + + Returned (function value): + double Earth rotation angle (radians), range 0-2pi + + Notes: + + 1) The UT1 date dj1+dj2 is a Julian Date, apportioned in any + convenient way between the arguments dj1 and dj2. For example, + JD(UT1)=2450123.7 could be expressed in any of these ways, + among others: + + dj1 dj2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. The date & time method is + best matched to the algorithm used: maximum precision is + delivered when the dj1 argument is for 0hrs UT1 on the day in + question and the dj2 argument lies in the range 0 to 1, or vice + versa. + + 2) The algorithm is adapted from Expression 22 of Capitaine et al. + 2000. The time argument has been expressed in days directly, + and, to retain precision, integer contributions have been + eliminated. The same formulation is given in IERS Conventions + (2003), Chap. 5, Eq. 14. + + Called: + eraAnp normalize angle into range 0 to 2pi + + References: + + Capitaine N., Guinot B. and McCarthy D.D, 2000, Astron. + Astrophys., 355, 398-405. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + dj1_in = numpy.array(dj1, dtype=numpy.double, order="C", copy=False, subok=True) + dj2_in = numpy.array(dj2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), dj1_in, dj2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dj1_in, dj2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._era00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gmst00(uta, utb, tta, ttb): + """ + Wrapper for ERFA function ``eraGmst00``. + + Parameters + ---------- + uta : double array + utb : double array + tta : double array + ttb : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G m s t 0 0 + - - - - - - - - - - + + Greenwich mean sidereal time (model consistent with IAU 2000 + resolutions). + + Given: + uta,utb double UT1 as a 2-part Julian Date (Notes 1,2) + tta,ttb double TT as a 2-part Julian Date (Notes 1,2) + + Returned (function value): + double Greenwich mean sidereal time (radians) + + Notes: + + 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both + Julian Dates, apportioned in any convenient way between the + argument pairs. For example, JD=2450123.7 could be expressed in + any of these ways, among others: + + Part A Part B + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable (in the case of UT; the TT is not at all critical + in this respect). The J2000 and MJD methods are good compromises + between resolution and convenience. For UT, the date & time + method is best matched to the algorithm that is used by the Earth + Rotation Angle function, called internally: maximum precision is + delivered when the uta argument is for 0hrs UT1 on the day in + question and the utb argument lies in the range 0 to 1, or vice + versa. + + 2) Both UT1 and TT are required, UT1 to predict the Earth rotation + and TT to predict the effects of precession. If UT1 is used for + both purposes, errors of order 100 microarcseconds result. + + 3) This GMST is compatible with the IAU 2000 resolutions and must be + used only in conjunction with other IAU 2000 compatible + components such as precession-nutation and equation of the + equinoxes. + + 4) The result is returned in the range 0 to 2pi. + + 5) The algorithm is from Capitaine et al. (2003) and IERS + Conventions 2003. + + Called: + eraEra00 Earth rotation angle, IAU 2000 + eraAnp normalize angle into range 0 to 2pi + + References: + + Capitaine, N., Wallace, P.T. and McCarthy, D.D., "Expressions to + implement the IAU 2000 definition of UT1", Astronomy & + Astrophysics, 406, 1135-1149 (2003) + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), uta_in, utb_in, tta_in, ttb_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [uta_in, utb_in, tta_in, ttb_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gmst00(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gmst06(uta, utb, tta, ttb): + """ + Wrapper for ERFA function ``eraGmst06``. + + Parameters + ---------- + uta : double array + utb : double array + tta : double array + ttb : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G m s t 0 6 + - - - - - - - - - - + + Greenwich mean sidereal time (consistent with IAU 2006 precession). + + Given: + uta,utb double UT1 as a 2-part Julian Date (Notes 1,2) + tta,ttb double TT as a 2-part Julian Date (Notes 1,2) + + Returned (function value): + double Greenwich mean sidereal time (radians) + + Notes: + + 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both + Julian Dates, apportioned in any convenient way between the + argument pairs. For example, JD=2450123.7 could be expressed in + any of these ways, among others: + + Part A Part B + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable (in the case of UT; the TT is not at all critical + in this respect). The J2000 and MJD methods are good compromises + between resolution and convenience. For UT, the date & time + method is best matched to the algorithm that is used by the Earth + rotation angle function, called internally: maximum precision is + delivered when the uta argument is for 0hrs UT1 on the day in + question and the utb argument lies in the range 0 to 1, or vice + versa. + + 2) Both UT1 and TT are required, UT1 to predict the Earth rotation + and TT to predict the effects of precession. If UT1 is used for + both purposes, errors of order 100 microarcseconds result. + + 3) This GMST is compatible with the IAU 2006 precession and must not + be used with other precession models. + + 4) The result is returned in the range 0 to 2pi. + + Called: + eraEra00 Earth rotation angle, IAU 2000 + eraAnp normalize angle into range 0 to 2pi + + Reference: + + Capitaine, N., Wallace, P.T. & Chapront, J., 2005, + Astron.Astrophys. 432, 355 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), uta_in, utb_in, tta_in, ttb_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [uta_in, utb_in, tta_in, ttb_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gmst06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gmst82(dj1, dj2): + """ + Wrapper for ERFA function ``eraGmst82``. + + Parameters + ---------- + dj1 : double array + dj2 : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G m s t 8 2 + - - - - - - - - - - + + Universal Time to Greenwich mean sidereal time (IAU 1982 model). + + Given: + dj1,dj2 double UT1 Julian Date (see note) + + Returned (function value): + double Greenwich mean sidereal time (radians) + + Notes: + + 1) The UT1 date dj1+dj2 is a Julian Date, apportioned in any + convenient way between the arguments dj1 and dj2. For example, + JD(UT1)=2450123.7 could be expressed in any of these ways, + among others: + + dj1 dj2 + + 2450123.7 0 (JD method) + 2451545 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. The date & time method is + best matched to the algorithm used: maximum accuracy (or, at + least, minimum noise) is delivered when the dj1 argument is for + 0hrs UT1 on the day in question and the dj2 argument lies in the + range 0 to 1, or vice versa. + + 2) The algorithm is based on the IAU 1982 expression. This is + always described as giving the GMST at 0 hours UT1. In fact, it + gives the difference between the GMST and the UT, the steady + 4-minutes-per-day drawing-ahead of ST with respect to UT. When + whole days are ignored, the expression happens to equal the GMST + at 0 hours UT1 each day. + + 3) In this function, the entire UT1 (the sum of the two arguments + dj1 and dj2) is used directly as the argument for the standard + formula, the constant term of which is adjusted by 12 hours to + take account of the noon phasing of Julian Date. The UT1 is then + added, but omitting whole days to conserve accuracy. + + Called: + eraAnp normalize angle into range 0 to 2pi + + References: + + Transactions of the International Astronomical Union, + XVIII B, 67 (1983). + + Aoki et al., Astron. Astrophys. 105, 359-361 (1982). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + dj1_in = numpy.array(dj1, dtype=numpy.double, order="C", copy=False, subok=True) + dj2_in = numpy.array(dj2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), dj1_in, dj2_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dj1_in, dj2_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gmst82(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gst00a(uta, utb, tta, ttb): + """ + Wrapper for ERFA function ``eraGst00a``. + + Parameters + ---------- + uta : double array + utb : double array + tta : double array + ttb : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G s t 0 0 a + - - - - - - - - - - + + Greenwich apparent sidereal time (consistent with IAU 2000 + resolutions). + + Given: + uta,utb double UT1 as a 2-part Julian Date (Notes 1,2) + tta,ttb double TT as a 2-part Julian Date (Notes 1,2) + + Returned (function value): + double Greenwich apparent sidereal time (radians) + + Notes: + + 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both + Julian Dates, apportioned in any convenient way between the + argument pairs. For example, JD=2450123.7 could be expressed in + any of these ways, among others: + + Part A Part B + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable (in the case of UT; the TT is not at all critical + in this respect). The J2000 and MJD methods are good compromises + between resolution and convenience. For UT, the date & time + method is best matched to the algorithm that is used by the Earth + Rotation Angle function, called internally: maximum precision is + delivered when the uta argument is for 0hrs UT1 on the day in + question and the utb argument lies in the range 0 to 1, or vice + versa. + + 2) Both UT1 and TT are required, UT1 to predict the Earth rotation + and TT to predict the effects of precession-nutation. If UT1 is + used for both purposes, errors of order 100 microarcseconds + result. + + 3) This GAST is compatible with the IAU 2000 resolutions and must be + used only in conjunction with other IAU 2000 compatible + components such as precession-nutation. + + 4) The result is returned in the range 0 to 2pi. + + 5) The algorithm is from Capitaine et al. (2003) and IERS + Conventions 2003. + + Called: + eraGmst00 Greenwich mean sidereal time, IAU 2000 + eraEe00a equation of the equinoxes, IAU 2000A + eraAnp normalize angle into range 0 to 2pi + + References: + + Capitaine, N., Wallace, P.T. and McCarthy, D.D., "Expressions to + implement the IAU 2000 definition of UT1", Astronomy & + Astrophysics, 406, 1135-1149 (2003) + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), uta_in, utb_in, tta_in, ttb_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [uta_in, utb_in, tta_in, ttb_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gst00a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gst00b(uta, utb): + """ + Wrapper for ERFA function ``eraGst00b``. + + Parameters + ---------- + uta : double array + utb : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G s t 0 0 b + - - - - - - - - - - + + Greenwich apparent sidereal time (consistent with IAU 2000 + resolutions but using the truncated nutation model IAU 2000B). + + Given: + uta,utb double UT1 as a 2-part Julian Date (Notes 1,2) + + Returned (function value): + double Greenwich apparent sidereal time (radians) + + Notes: + + 1) The UT1 date uta+utb is a Julian Date, apportioned in any + convenient way between the argument pair. For example, + JD=2450123.7 could be expressed in any of these ways, among + others: + + uta utb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. For UT, the date & time + method is best matched to the algorithm that is used by the Earth + Rotation Angle function, called internally: maximum precision is + delivered when the uta argument is for 0hrs UT1 on the day in + question and the utb argument lies in the range 0 to 1, or vice + versa. + + 2) The result is compatible with the IAU 2000 resolutions, except + that accuracy has been compromised for the sake of speed and + convenience in two respects: + + . UT is used instead of TDB (or TT) to compute the precession + component of GMST and the equation of the equinoxes. This + results in errors of order 0.1 mas at present. + + . The IAU 2000B abridged nutation model (McCarthy & Luzum, 2001) + is used, introducing errors of up to 1 mas. + + 3) This GAST is compatible with the IAU 2000 resolutions and must be + used only in conjunction with other IAU 2000 compatible + components such as precession-nutation. + + 4) The result is returned in the range 0 to 2pi. + + 5) The algorithm is from Capitaine et al. (2003) and IERS + Conventions 2003. + + Called: + eraGmst00 Greenwich mean sidereal time, IAU 2000 + eraEe00b equation of the equinoxes, IAU 2000B + eraAnp normalize angle into range 0 to 2pi + + References: + + Capitaine, N., Wallace, P.T. and McCarthy, D.D., "Expressions to + implement the IAU 2000 definition of UT1", Astronomy & + Astrophysics, 406, 1135-1149 (2003) + + McCarthy, D.D. & Luzum, B.J., "An abridged model of the + precession-nutation of the celestial pole", Celestial Mechanics & + Dynamical Astronomy, 85, 37-49 (2003) + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), uta_in, utb_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [uta_in, utb_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gst00b(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gst06(uta, utb, tta, ttb, rnpb): + """ + Wrapper for ERFA function ``eraGst06``. + + Parameters + ---------- + uta : double array + utb : double array + tta : double array + ttb : double array + rnpb : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a G s t 0 6 + - - - - - - - - - + + Greenwich apparent sidereal time, IAU 2006, given the NPB matrix. + + Given: + uta,utb double UT1 as a 2-part Julian Date (Notes 1,2) + tta,ttb double TT as a 2-part Julian Date (Notes 1,2) + rnpb double[3][3] nutation x precession x bias matrix + + Returned (function value): + double Greenwich apparent sidereal time (radians) + + Notes: + + 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both + Julian Dates, apportioned in any convenient way between the + argument pairs. For example, JD=2450123.7 could be expressed in + any of these ways, among others: + + Part A Part B + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable (in the case of UT; the TT is not at all critical + in this respect). The J2000 and MJD methods are good compromises + between resolution and convenience. For UT, the date & time + method is best matched to the algorithm that is used by the Earth + rotation angle function, called internally: maximum precision is + delivered when the uta argument is for 0hrs UT1 on the day in + question and the utb argument lies in the range 0 to 1, or vice + versa. + + 2) Both UT1 and TT are required, UT1 to predict the Earth rotation + and TT to predict the effects of precession-nutation. If UT1 is + used for both purposes, errors of order 100 microarcseconds + result. + + 3) Although the function uses the IAU 2006 series for s+XY/2, it is + otherwise independent of the precession-nutation model and can in + practice be used with any equinox-based NPB matrix. + + 4) The result is returned in the range 0 to 2pi. + + Called: + eraBpn2xy extract CIP X,Y coordinates from NPB matrix + eraS06 the CIO locator s, given X,Y, IAU 2006 + eraAnp normalize angle into range 0 to 2pi + eraEra00 Earth rotation angle, IAU 2000 + eraEors equation of the origins, given NPB matrix and s + + Reference: + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + rnpb_in = numpy.array(rnpb, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(rnpb_in, (3, 3), "rnpb") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), uta_in, utb_in, tta_in, ttb_in, rnpb_in[...,0,0]) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [uta_in, utb_in, tta_in, ttb_in, rnpb_in[...,0,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*5 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gst06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gst06a(uta, utb, tta, ttb): + """ + Wrapper for ERFA function ``eraGst06a``. + + Parameters + ---------- + uta : double array + utb : double array + tta : double array + ttb : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G s t 0 6 a + - - - - - - - - - - + + Greenwich apparent sidereal time (consistent with IAU 2000 and 2006 + resolutions). + + Given: + uta,utb double UT1 as a 2-part Julian Date (Notes 1,2) + tta,ttb double TT as a 2-part Julian Date (Notes 1,2) + + Returned (function value): + double Greenwich apparent sidereal time (radians) + + Notes: + + 1) The UT1 and TT dates uta+utb and tta+ttb respectively, are both + Julian Dates, apportioned in any convenient way between the + argument pairs. For example, JD=2450123.7 could be expressed in + any of these ways, among others: + + Part A Part B + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable (in the case of UT; the TT is not at all critical + in this respect). The J2000 and MJD methods are good compromises + between resolution and convenience. For UT, the date & time + method is best matched to the algorithm that is used by the Earth + rotation angle function, called internally: maximum precision is + delivered when the uta argument is for 0hrs UT1 on the day in + question and the utb argument lies in the range 0 to 1, or vice + versa. + + 2) Both UT1 and TT are required, UT1 to predict the Earth rotation + and TT to predict the effects of precession-nutation. If UT1 is + used for both purposes, errors of order 100 microarcseconds + result. + + 3) This GAST is compatible with the IAU 2000/2006 resolutions and + must be used only in conjunction with IAU 2006 precession and + IAU 2000A nutation. + + 4) The result is returned in the range 0 to 2pi. + + Called: + eraPnm06a classical NPB matrix, IAU 2006/2000A + eraGst06 Greenwich apparent ST, IAU 2006, given NPB matrix + + Reference: + + Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + tta_in = numpy.array(tta, dtype=numpy.double, order="C", copy=False, subok=True) + ttb_in = numpy.array(ttb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), uta_in, utb_in, tta_in, ttb_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [uta_in, utb_in, tta_in, ttb_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gst06a(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def gst94(uta, utb): + """ + Wrapper for ERFA function ``eraGst94``. + + Parameters + ---------- + uta : double array + utb : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a G s t 9 4 + - - - - - - - - - + + Greenwich apparent sidereal time (consistent with IAU 1982/94 + resolutions). + + Given: + uta,utb double UT1 as a 2-part Julian Date (Notes 1,2) + + Returned (function value): + double Greenwich apparent sidereal time (radians) + + Notes: + + 1) The UT1 date uta+utb is a Julian Date, apportioned in any + convenient way between the argument pair. For example, + JD=2450123.7 could be expressed in any of these ways, among + others: + + uta utb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in cases + where the loss of several decimal digits of resolution is + acceptable. The J2000 and MJD methods are good compromises + between resolution and convenience. For UT, the date & time + method is best matched to the algorithm that is used by the Earth + Rotation Angle function, called internally: maximum precision is + delivered when the uta argument is for 0hrs UT1 on the day in + question and the utb argument lies in the range 0 to 1, or vice + versa. + + 2) The result is compatible with the IAU 1982 and 1994 resolutions, + except that accuracy has been compromised for the sake of + convenience in that UT is used instead of TDB (or TT) to compute + the equation of the equinoxes. + + 3) This GAST must be used only in conjunction with contemporaneous + IAU standards such as 1976 precession, 1980 obliquity and 1982 + nutation. It is not compatible with the IAU 2000 resolutions. + + 4) The result is returned in the range 0 to 2pi. + + Called: + eraGmst82 Greenwich mean sidereal time, IAU 1982 + eraEqeq94 equation of the equinoxes, IAU 1994 + eraAnp normalize angle into range 0 to 2pi + + References: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + IAU Resolution C7, Recommendation 3 (1994) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + uta_in = numpy.array(uta, dtype=numpy.double, order="C", copy=False, subok=True) + utb_in = numpy.array(utb, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), uta_in, utb_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [uta_in, utb_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gst94(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def pvstar(pv): + """ + Wrapper for ERFA function ``eraPvstar``. + + Parameters + ---------- + pv : double array + + Returns + ------- + ra : double array + dec : double array + pmr : double array + pmd : double array + px : double array + rv : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a P v s t a r + - - - - - - - - - - + + Convert star position+velocity vector to catalog coordinates. + + Given (Note 1): + pv double[2][3] pv-vector (au, au/day) + + Returned (Note 2): + ra double right ascension (radians) + dec double declination (radians) + pmr double RA proper motion (radians/year) + pmd double Dec proper motion (radians/year) + px double parallax (arcsec) + rv double radial velocity (km/s, positive = receding) + + Returned (function value): + int status: + 0 = OK + -1 = superluminal speed (Note 5) + -2 = null position vector + + Notes: + + 1) The specified pv-vector is the coordinate direction (and its rate + of change) for the date at which the light leaving the star + reached the solar-system barycenter. + + 2) The star data returned by this function are "observables" for an + imaginary observer at the solar-system barycenter. Proper motion + and radial velocity are, strictly, in terms of barycentric + coordinate time, TCB. For most practical applications, it is + permissible to neglect the distinction between TCB and ordinary + "proper" time on Earth (TT/TAI). The result will, as a rule, be + limited by the intrinsic accuracy of the proper-motion and + radial-velocity data; moreover, the supplied pv-vector is likely + to be merely an intermediate result (for example generated by the + function eraStarpv), so that a change of time unit will cancel + out overall. + + In accordance with normal star-catalog conventions, the object's + right ascension and declination are freed from the effects of + secular aberration. The frame, which is aligned to the catalog + equator and equinox, is Lorentzian and centered on the SSB. + + Summarizing, the specified pv-vector is for most stars almost + identical to the result of applying the standard geometrical + "space motion" transformation to the catalog data. The + differences, which are the subject of the Stumpff paper cited + below, are: + + (i) In stars with significant radial velocity and proper motion, + the constantly changing light-time distorts the apparent proper + motion. Note that this is a classical, not a relativistic, + effect. + + (ii) The transformation complies with special relativity. + + 3) Care is needed with units. The star coordinates are in radians + and the proper motions in radians per Julian year, but the + parallax is in arcseconds; the radial velocity is in km/s, but + the pv-vector result is in au and au/day. + + 4) The proper motions are the rate of change of the right ascension + and declination at the catalog epoch and are in radians per Julian + year. The RA proper motion is in terms of coordinate angle, not + true angle, and will thus be numerically larger at high + declinations. + + 5) Straight-line motion at constant speed in the inertial frame is + assumed. If the speed is greater than or equal to the speed of + light, the function aborts with an error status. + + 6) The inverse transformation is performed by the function eraStarpv. + + Called: + eraPn decompose p-vector into modulus and direction + eraPdp scalar product of two p-vectors + eraSxp multiply p-vector by scalar + eraPmp p-vector minus p-vector + eraPm modulus of p-vector + eraPpp p-vector plus p-vector + eraPv2s pv-vector to spherical + eraAnp normalize angle into range 0 to 2pi + + Reference: + + Stumpff, P., 1985, Astron.Astrophys. 144, 232-240. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + pv_in = numpy.array(pv, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(pv_in, (2, 3), "pv") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), pv_in[...,0,0]) + ra_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dec_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pmr_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pmd_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + px_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rv_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [pv_in[...,0,0], ra_out, dec_out, pmr_out, pmd_out, px_out, rv_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*7 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pvstar(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'pvstar') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ra_out.shape) > 0 and ra_out.shape[0] == 1 + ra_out = ra_out.reshape(ra_out.shape[1:]) + assert len(dec_out.shape) > 0 and dec_out.shape[0] == 1 + dec_out = dec_out.reshape(dec_out.shape[1:]) + assert len(pmr_out.shape) > 0 and pmr_out.shape[0] == 1 + pmr_out = pmr_out.reshape(pmr_out.shape[1:]) + assert len(pmd_out.shape) > 0 and pmd_out.shape[0] == 1 + pmd_out = pmd_out.reshape(pmd_out.shape[1:]) + assert len(px_out.shape) > 0 and px_out.shape[0] == 1 + px_out = px_out.reshape(px_out.shape[1:]) + assert len(rv_out.shape) > 0 and rv_out.shape[0] == 1 + rv_out = rv_out.reshape(rv_out.shape[1:]) + + return ra_out, dec_out, pmr_out, pmd_out, px_out, rv_out +STATUS_CODES['pvstar'] = {0: 'OK', -1: 'superluminal speed (Note 5)', -2: 'null position vector'} + + + +def starpv(ra, dec, pmr, pmd, px, rv): + """ + Wrapper for ERFA function ``eraStarpv``. + + Parameters + ---------- + ra : double array + dec : double array + pmr : double array + pmd : double array + px : double array + rv : double array + + Returns + ------- + pv : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a S t a r p v + - - - - - - - - - - + + Convert star catalog coordinates to position+velocity vector. + + Given (Note 1): + ra double right ascension (radians) + dec double declination (radians) + pmr double RA proper motion (radians/year) + pmd double Dec proper motion (radians/year) + px double parallax (arcseconds) + rv double radial velocity (km/s, positive = receding) + + Returned (Note 2): + pv double[2][3] pv-vector (au, au/day) + + Returned (function value): + int status: + 0 = no warnings + 1 = distance overridden (Note 6) + 2 = excessive speed (Note 7) + 4 = solution didn't converge (Note 8) + else = binary logical OR of the above + + Notes: + + 1) The star data accepted by this function are "observables" for an + imaginary observer at the solar-system barycenter. Proper motion + and radial velocity are, strictly, in terms of barycentric + coordinate time, TCB. For most practical applications, it is + permissible to neglect the distinction between TCB and ordinary + "proper" time on Earth (TT/TAI). The result will, as a rule, be + limited by the intrinsic accuracy of the proper-motion and + radial-velocity data; moreover, the pv-vector is likely to be + merely an intermediate result, so that a change of time unit + would cancel out overall. + + In accordance with normal star-catalog conventions, the object's + right ascension and declination are freed from the effects of + secular aberration. The frame, which is aligned to the catalog + equator and equinox, is Lorentzian and centered on the SSB. + + 2) The resulting position and velocity pv-vector is with respect to + the same frame and, like the catalog coordinates, is freed from + the effects of secular aberration. Should the "coordinate + direction", where the object was located at the catalog epoch, be + required, it may be obtained by calculating the magnitude of the + position vector pv[0][0-2] dividing by the speed of light in + au/day to give the light-time, and then multiplying the space + velocity pv[1][0-2] by this light-time and adding the result to + pv[0][0-2]. + + Summarizing, the pv-vector returned is for most stars almost + identical to the result of applying the standard geometrical + "space motion" transformation. The differences, which are the + subject of the Stumpff paper referenced below, are: + + (i) In stars with significant radial velocity and proper motion, + the constantly changing light-time distorts the apparent proper + motion. Note that this is a classical, not a relativistic, + effect. + + (ii) The transformation complies with special relativity. + + 3) Care is needed with units. The star coordinates are in radians + and the proper motions in radians per Julian year, but the + parallax is in arcseconds; the radial velocity is in km/s, but + the pv-vector result is in au and au/day. + + 4) The RA proper motion is in terms of coordinate angle, not true + angle. If the catalog uses arcseconds for both RA and Dec proper + motions, the RA proper motion will need to be divided by cos(Dec) + before use. + + 5) Straight-line motion at constant speed, in the inertial frame, + is assumed. + + 6) An extremely small (or zero or negative) parallax is interpreted + to mean that the object is on the "celestial sphere", the radius + of which is an arbitrary (large) value (see the constant PXMIN). + When the distance is overridden in this way, the status, + initially zero, has 1 added to it. + + 7) If the space velocity is a significant fraction of c (see the + constant VMAX), it is arbitrarily set to zero. When this action + occurs, 2 is added to the status. + + 8) The relativistic adjustment involves an iterative calculation. + If the process fails to converge within a set number (IMAX) of + iterations, 4 is added to the status. + + 9) The inverse transformation is performed by the function + eraPvstar. + + Called: + eraS2pv spherical coordinates to pv-vector + eraPm modulus of p-vector + eraZp zero p-vector + eraPn decompose p-vector into modulus and direction + eraPdp scalar product of two p-vectors + eraSxp multiply p-vector by scalar + eraPmp p-vector minus p-vector + eraPpp p-vector plus p-vector + + Reference: + + Stumpff, P., 1985, Astron.Astrophys. 144, 232-240. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ra_in = numpy.array(ra, dtype=numpy.double, order="C", copy=False, subok=True) + dec_in = numpy.array(dec, dtype=numpy.double, order="C", copy=False, subok=True) + pmr_in = numpy.array(pmr, dtype=numpy.double, order="C", copy=False, subok=True) + pmd_in = numpy.array(pmd, dtype=numpy.double, order="C", copy=False, subok=True) + px_in = numpy.array(px, dtype=numpy.double, order="C", copy=False, subok=True) + rv_in = numpy.array(rv, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ra_in, dec_in, pmr_in, pmd_in, px_in, rv_in) + pv_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ra_in, dec_in, pmr_in, pmd_in, px_in, rv_in, pv_out[...,0,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._starpv(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'starpv') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(pv_out.shape) > 0 and pv_out.shape[0] == 1 + pv_out = pv_out.reshape(pv_out.shape[1:]) + + return pv_out +STATUS_CODES['starpv'] = {0: 'no warnings', 1: 'distance overridden (Note 6)', 2: 'excessive speed (Note 7)', 4: "solution didn't converge (Note 8)", 'else': 'binary logical OR of the above'} + + + +def fk52h(r5, d5, dr5, dd5, px5, rv5): + """ + Wrapper for ERFA function ``eraFk52h``. + + Parameters + ---------- + r5 : double array + d5 : double array + dr5 : double array + dd5 : double array + px5 : double array + rv5 : double array + + Returns + ------- + rh : double array + dh : double array + drh : double array + ddh : double array + pxh : double array + rvh : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a F k 5 2 h + - - - - - - - - - + + Transform FK5 (J2000.0) star data into the Hipparcos system. + + Given (all FK5, equinox J2000.0, epoch J2000.0): + r5 double RA (radians) + d5 double Dec (radians) + dr5 double proper motion in RA (dRA/dt, rad/Jyear) + dd5 double proper motion in Dec (dDec/dt, rad/Jyear) + px5 double parallax (arcsec) + rv5 double radial velocity (km/s, positive = receding) + + Returned (all Hipparcos, epoch J2000.0): + rh double RA (radians) + dh double Dec (radians) + drh double proper motion in RA (dRA/dt, rad/Jyear) + ddh double proper motion in Dec (dDec/dt, rad/Jyear) + pxh double parallax (arcsec) + rvh double radial velocity (km/s, positive = receding) + + Notes: + + 1) This function transforms FK5 star positions and proper motions + into the system of the Hipparcos catalog. + + 2) The proper motions in RA are dRA/dt rather than + cos(Dec)*dRA/dt, and are per year rather than per century. + + 3) The FK5 to Hipparcos transformation is modeled as a pure + rotation and spin; zonal errors in the FK5 catalog are not + taken into account. + + 4) See also eraH2fk5, eraFk5hz, eraHfk5z. + + Called: + eraStarpv star catalog data to space motion pv-vector + eraFk5hip FK5 to Hipparcos rotation and spin + eraRxp product of r-matrix and p-vector + eraPxp vector product of two p-vectors + eraPpp p-vector plus p-vector + eraPvstar space motion pv-vector to star catalog data + + Reference: + + F.Mignard & M.Froeschle, Astron. Astrophys. 354, 732-739 (2000). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + r5_in = numpy.array(r5, dtype=numpy.double, order="C", copy=False, subok=True) + d5_in = numpy.array(d5, dtype=numpy.double, order="C", copy=False, subok=True) + dr5_in = numpy.array(dr5, dtype=numpy.double, order="C", copy=False, subok=True) + dd5_in = numpy.array(dd5, dtype=numpy.double, order="C", copy=False, subok=True) + px5_in = numpy.array(px5, dtype=numpy.double, order="C", copy=False, subok=True) + rv5_in = numpy.array(rv5, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), r5_in, d5_in, dr5_in, dd5_in, px5_in, rv5_in) + rh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + drh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + ddh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pxh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rvh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [r5_in, d5_in, dr5_in, dd5_in, px5_in, rv5_in, rh_out, dh_out, drh_out, ddh_out, pxh_out, rvh_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*6 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fk52h(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rh_out.shape) > 0 and rh_out.shape[0] == 1 + rh_out = rh_out.reshape(rh_out.shape[1:]) + assert len(dh_out.shape) > 0 and dh_out.shape[0] == 1 + dh_out = dh_out.reshape(dh_out.shape[1:]) + assert len(drh_out.shape) > 0 and drh_out.shape[0] == 1 + drh_out = drh_out.reshape(drh_out.shape[1:]) + assert len(ddh_out.shape) > 0 and ddh_out.shape[0] == 1 + ddh_out = ddh_out.reshape(ddh_out.shape[1:]) + assert len(pxh_out.shape) > 0 and pxh_out.shape[0] == 1 + pxh_out = pxh_out.reshape(pxh_out.shape[1:]) + assert len(rvh_out.shape) > 0 and rvh_out.shape[0] == 1 + rvh_out = rvh_out.reshape(rvh_out.shape[1:]) + + return rh_out, dh_out, drh_out, ddh_out, pxh_out, rvh_out + + +def fk5hip(): + """ + Wrapper for ERFA function ``eraFk5hip``. + + Parameters + ---------- + + Returns + ------- + r5h : double array + s5h : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a F k 5 h i p + - - - - - - - - - - + + FK5 to Hipparcos rotation and spin. + + Returned: + r5h double[3][3] r-matrix: FK5 rotation wrt Hipparcos (Note 2) + s5h double[3] r-vector: FK5 spin wrt Hipparcos (Note 3) + + Notes: + + 1) This function models the FK5 to Hipparcos transformation as a + pure rotation and spin; zonal errors in the FK5 catalogue are + not taken into account. + + 2) The r-matrix r5h operates in the sense: + + P_Hipparcos = r5h x P_FK5 + + where P_FK5 is a p-vector in the FK5 frame, and P_Hipparcos is + the equivalent Hipparcos p-vector. + + 3) The r-vector s5h represents the time derivative of the FK5 to + Hipparcos rotation. The units are radians per year (Julian, + TDB). + + Called: + eraRv2m r-vector to r-matrix + + Reference: + + F.Mignard & M.Froeschle, Astron. Astrophys. 354, 732-739 (2000). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ) + r5h_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + s5h_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [r5h_out[...,0,0], s5h_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*0 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fk5hip(it) + + return r5h_out, s5h_out + + +def fk5hz(r5, d5, date1, date2): + """ + Wrapper for ERFA function ``eraFk5hz``. + + Parameters + ---------- + r5 : double array + d5 : double array + date1 : double array + date2 : double array + + Returns + ------- + rh : double array + dh : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a F k 5 h z + - - - - - - - - - + + Transform an FK5 (J2000.0) star position into the system of the + Hipparcos catalogue, assuming zero Hipparcos proper motion. + + Given: + r5 double FK5 RA (radians), equinox J2000.0, at date + d5 double FK5 Dec (radians), equinox J2000.0, at date + date1,date2 double TDB date (Notes 1,2) + + Returned: + rh double Hipparcos RA (radians) + dh double Hipparcos Dec (radians) + + Notes: + + 1) This function converts a star position from the FK5 system to + the Hipparcos system, in such a way that the Hipparcos proper + motion is zero. Because such a star has, in general, a non-zero + proper motion in the FK5 system, the function requires the date + at which the position in the FK5 system was determined. + + 2) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 3) The FK5 to Hipparcos transformation is modeled as a pure + rotation and spin; zonal errors in the FK5 catalogue are not + taken into account. + + 4) The position returned by this function is in the Hipparcos + reference system but at date date1+date2. + + 5) See also eraFk52h, eraH2fk5, eraHfk5z. + + Called: + eraS2c spherical coordinates to unit vector + eraFk5hip FK5 to Hipparcos rotation and spin + eraSxp multiply p-vector by scalar + eraRv2m r-vector to r-matrix + eraTrxp product of transpose of r-matrix and p-vector + eraPxp vector product of two p-vectors + eraC2s p-vector to spherical + eraAnp normalize angle into range 0 to 2pi + + Reference: + + F.Mignard & M.Froeschle, 2000, Astron.Astrophys. 354, 732-739. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + r5_in = numpy.array(r5, dtype=numpy.double, order="C", copy=False, subok=True) + d5_in = numpy.array(d5, dtype=numpy.double, order="C", copy=False, subok=True) + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), r5_in, d5_in, date1_in, date2_in) + rh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dh_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [r5_in, d5_in, date1_in, date2_in, rh_out, dh_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._fk5hz(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rh_out.shape) > 0 and rh_out.shape[0] == 1 + rh_out = rh_out.reshape(rh_out.shape[1:]) + assert len(dh_out.shape) > 0 and dh_out.shape[0] == 1 + dh_out = dh_out.reshape(dh_out.shape[1:]) + + return rh_out, dh_out + + +def h2fk5(rh, dh, drh, ddh, pxh, rvh): + """ + Wrapper for ERFA function ``eraH2fk5``. + + Parameters + ---------- + rh : double array + dh : double array + drh : double array + ddh : double array + pxh : double array + rvh : double array + + Returns + ------- + r5 : double array + d5 : double array + dr5 : double array + dd5 : double array + px5 : double array + rv5 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a H 2 f k 5 + - - - - - - - - - + + Transform Hipparcos star data into the FK5 (J2000.0) system. + + Given (all Hipparcos, epoch J2000.0): + rh double RA (radians) + dh double Dec (radians) + drh double proper motion in RA (dRA/dt, rad/Jyear) + ddh double proper motion in Dec (dDec/dt, rad/Jyear) + pxh double parallax (arcsec) + rvh double radial velocity (km/s, positive = receding) + + Returned (all FK5, equinox J2000.0, epoch J2000.0): + r5 double RA (radians) + d5 double Dec (radians) + dr5 double proper motion in RA (dRA/dt, rad/Jyear) + dd5 double proper motion in Dec (dDec/dt, rad/Jyear) + px5 double parallax (arcsec) + rv5 double radial velocity (km/s, positive = receding) + + Notes: + + 1) This function transforms Hipparcos star positions and proper + motions into FK5 J2000.0. + + 2) The proper motions in RA are dRA/dt rather than + cos(Dec)*dRA/dt, and are per year rather than per century. + + 3) The FK5 to Hipparcos transformation is modeled as a pure + rotation and spin; zonal errors in the FK5 catalog are not + taken into account. + + 4) See also eraFk52h, eraFk5hz, eraHfk5z. + + Called: + eraStarpv star catalog data to space motion pv-vector + eraFk5hip FK5 to Hipparcos rotation and spin + eraRv2m r-vector to r-matrix + eraRxp product of r-matrix and p-vector + eraTrxp product of transpose of r-matrix and p-vector + eraPxp vector product of two p-vectors + eraPmp p-vector minus p-vector + eraPvstar space motion pv-vector to star catalog data + + Reference: + + F.Mignard & M.Froeschle, Astron. Astrophys. 354, 732-739 (2000). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + dh_in = numpy.array(dh, dtype=numpy.double, order="C", copy=False, subok=True) + drh_in = numpy.array(drh, dtype=numpy.double, order="C", copy=False, subok=True) + ddh_in = numpy.array(ddh, dtype=numpy.double, order="C", copy=False, subok=True) + pxh_in = numpy.array(pxh, dtype=numpy.double, order="C", copy=False, subok=True) + rvh_in = numpy.array(rvh, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rh_in, dh_in, drh_in, ddh_in, pxh_in, rvh_in) + r5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + d5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dr5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dd5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + px5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rv5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rh_in, dh_in, drh_in, ddh_in, pxh_in, rvh_in, r5_out, d5_out, dr5_out, dd5_out, px5_out, rv5_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*6 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._h2fk5(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(r5_out.shape) > 0 and r5_out.shape[0] == 1 + r5_out = r5_out.reshape(r5_out.shape[1:]) + assert len(d5_out.shape) > 0 and d5_out.shape[0] == 1 + d5_out = d5_out.reshape(d5_out.shape[1:]) + assert len(dr5_out.shape) > 0 and dr5_out.shape[0] == 1 + dr5_out = dr5_out.reshape(dr5_out.shape[1:]) + assert len(dd5_out.shape) > 0 and dd5_out.shape[0] == 1 + dd5_out = dd5_out.reshape(dd5_out.shape[1:]) + assert len(px5_out.shape) > 0 and px5_out.shape[0] == 1 + px5_out = px5_out.reshape(px5_out.shape[1:]) + assert len(rv5_out.shape) > 0 and rv5_out.shape[0] == 1 + rv5_out = rv5_out.reshape(rv5_out.shape[1:]) + + return r5_out, d5_out, dr5_out, dd5_out, px5_out, rv5_out + + +def hfk5z(rh, dh, date1, date2): + """ + Wrapper for ERFA function ``eraHfk5z``. + + Parameters + ---------- + rh : double array + dh : double array + date1 : double array + date2 : double array + + Returns + ------- + r5 : double array + d5 : double array + dr5 : double array + dd5 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a H f k 5 z + - - - - - - - - - + + Transform a Hipparcos star position into FK5 J2000.0, assuming + zero Hipparcos proper motion. + + Given: + rh double Hipparcos RA (radians) + dh double Hipparcos Dec (radians) + date1,date2 double TDB date (Note 1) + + Returned (all FK5, equinox J2000.0, date date1+date2): + r5 double RA (radians) + d5 double Dec (radians) + dr5 double FK5 RA proper motion (rad/year, Note 4) + dd5 double Dec proper motion (rad/year, Note 4) + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) The proper motion in RA is dRA/dt rather than cos(Dec)*dRA/dt. + + 3) The FK5 to Hipparcos transformation is modeled as a pure rotation + and spin; zonal errors in the FK5 catalogue are not taken into + account. + + 4) It was the intention that Hipparcos should be a close + approximation to an inertial frame, so that distant objects have + zero proper motion; such objects have (in general) non-zero + proper motion in FK5, and this function returns those fictitious + proper motions. + + 5) The position returned by this function is in the FK5 J2000.0 + reference system but at date date1+date2. + + 6) See also eraFk52h, eraH2fk5, eraFk5zhz. + + Called: + eraS2c spherical coordinates to unit vector + eraFk5hip FK5 to Hipparcos rotation and spin + eraRxp product of r-matrix and p-vector + eraSxp multiply p-vector by scalar + eraRxr product of two r-matrices + eraTrxp product of transpose of r-matrix and p-vector + eraPxp vector product of two p-vectors + eraPv2s pv-vector to spherical + eraAnp normalize angle into range 0 to 2pi + + Reference: + + F.Mignard & M.Froeschle, 2000, Astron.Astrophys. 354, 732-739. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + rh_in = numpy.array(rh, dtype=numpy.double, order="C", copy=False, subok=True) + dh_in = numpy.array(dh, dtype=numpy.double, order="C", copy=False, subok=True) + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), rh_in, dh_in, date1_in, date2_in) + r5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + d5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dr5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dd5_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [rh_in, dh_in, date1_in, date2_in, r5_out, d5_out, dr5_out, dd5_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*4 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._hfk5z(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(r5_out.shape) > 0 and r5_out.shape[0] == 1 + r5_out = r5_out.reshape(r5_out.shape[1:]) + assert len(d5_out.shape) > 0 and d5_out.shape[0] == 1 + d5_out = d5_out.reshape(d5_out.shape[1:]) + assert len(dr5_out.shape) > 0 and dr5_out.shape[0] == 1 + dr5_out = dr5_out.reshape(dr5_out.shape[1:]) + assert len(dd5_out.shape) > 0 and dd5_out.shape[0] == 1 + dd5_out = dd5_out.reshape(dd5_out.shape[1:]) + + return r5_out, d5_out, dr5_out, dd5_out + + +def starpm(ra1, dec1, pmr1, pmd1, px1, rv1, ep1a, ep1b, ep2a, ep2b): + """ + Wrapper for ERFA function ``eraStarpm``. + + Parameters + ---------- + ra1 : double array + dec1 : double array + pmr1 : double array + pmd1 : double array + px1 : double array + rv1 : double array + ep1a : double array + ep1b : double array + ep2a : double array + ep2b : double array + + Returns + ------- + ra2 : double array + dec2 : double array + pmr2 : double array + pmd2 : double array + px2 : double array + rv2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a S t a r p m + - - - - - - - - - - + + Star proper motion: update star catalog data for space motion. + + Given: + ra1 double right ascension (radians), before + dec1 double declination (radians), before + pmr1 double RA proper motion (radians/year), before + pmd1 double Dec proper motion (radians/year), before + px1 double parallax (arcseconds), before + rv1 double radial velocity (km/s, +ve = receding), before + ep1a double "before" epoch, part A (Note 1) + ep1b double "before" epoch, part B (Note 1) + ep2a double "after" epoch, part A (Note 1) + ep2b double "after" epoch, part B (Note 1) + + Returned: + ra2 double right ascension (radians), after + dec2 double declination (radians), after + pmr2 double RA proper motion (radians/year), after + pmd2 double Dec proper motion (radians/year), after + px2 double parallax (arcseconds), after + rv2 double radial velocity (km/s, +ve = receding), after + + Returned (function value): + int status: + -1 = system error (should not occur) + 0 = no warnings or errors + 1 = distance overridden (Note 6) + 2 = excessive velocity (Note 7) + 4 = solution didn't converge (Note 8) + else = binary logical OR of the above warnings + + Notes: + + 1) The starting and ending TDB dates ep1a+ep1b and ep2a+ep2b are + Julian Dates, apportioned in any convenient way between the two + parts (A and B). For example, JD(TDB)=2450123.7 could be + expressed in any of these ways, among others: + + epna epnb + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) In accordance with normal star-catalog conventions, the object's + right ascension and declination are freed from the effects of + secular aberration. The frame, which is aligned to the catalog + equator and equinox, is Lorentzian and centered on the SSB. + + The proper motions are the rate of change of the right ascension + and declination at the catalog epoch and are in radians per TDB + Julian year. + + The parallax and radial velocity are in the same frame. + + 3) Care is needed with units. The star coordinates are in radians + and the proper motions in radians per Julian year, but the + parallax is in arcseconds. + + 4) The RA proper motion is in terms of coordinate angle, not true + angle. If the catalog uses arcseconds for both RA and Dec proper + motions, the RA proper motion will need to be divided by cos(Dec) + before use. + + 5) Straight-line motion at constant speed, in the inertial frame, + is assumed. + + 6) An extremely small (or zero or negative) parallax is interpreted + to mean that the object is on the "celestial sphere", the radius + of which is an arbitrary (large) value (see the eraStarpv + function for the value used). When the distance is overridden in + this way, the status, initially zero, has 1 added to it. + + 7) If the space velocity is a significant fraction of c (see the + constant VMAX in the function eraStarpv), it is arbitrarily set + to zero. When this action occurs, 2 is added to the status. + + 8) The relativistic adjustment carried out in the eraStarpv function + involves an iterative calculation. If the process fails to + converge within a set number of iterations, 4 is added to the + status. + + Called: + eraStarpv star catalog data to space motion pv-vector + eraPvu update a pv-vector + eraPdp scalar product of two p-vectors + eraPvstar space motion pv-vector to star catalog data + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ra1_in = numpy.array(ra1, dtype=numpy.double, order="C", copy=False, subok=True) + dec1_in = numpy.array(dec1, dtype=numpy.double, order="C", copy=False, subok=True) + pmr1_in = numpy.array(pmr1, dtype=numpy.double, order="C", copy=False, subok=True) + pmd1_in = numpy.array(pmd1, dtype=numpy.double, order="C", copy=False, subok=True) + px1_in = numpy.array(px1, dtype=numpy.double, order="C", copy=False, subok=True) + rv1_in = numpy.array(rv1, dtype=numpy.double, order="C", copy=False, subok=True) + ep1a_in = numpy.array(ep1a, dtype=numpy.double, order="C", copy=False, subok=True) + ep1b_in = numpy.array(ep1b, dtype=numpy.double, order="C", copy=False, subok=True) + ep2a_in = numpy.array(ep2a, dtype=numpy.double, order="C", copy=False, subok=True) + ep2b_in = numpy.array(ep2b, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ra1_in, dec1_in, pmr1_in, pmd1_in, px1_in, rv1_in, ep1a_in, ep1b_in, ep2a_in, ep2b_in) + ra2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dec2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pmr2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pmd2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + px2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rv2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ra1_in, dec1_in, pmr1_in, pmd1_in, px1_in, rv1_in, ep1a_in, ep1b_in, ep2a_in, ep2b_in, ra2_out, dec2_out, pmr2_out, pmd2_out, px2_out, rv2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*10 + [['readwrite']]*7 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._starpm(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'starpm') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ra2_out.shape) > 0 and ra2_out.shape[0] == 1 + ra2_out = ra2_out.reshape(ra2_out.shape[1:]) + assert len(dec2_out.shape) > 0 and dec2_out.shape[0] == 1 + dec2_out = dec2_out.reshape(dec2_out.shape[1:]) + assert len(pmr2_out.shape) > 0 and pmr2_out.shape[0] == 1 + pmr2_out = pmr2_out.reshape(pmr2_out.shape[1:]) + assert len(pmd2_out.shape) > 0 and pmd2_out.shape[0] == 1 + pmd2_out = pmd2_out.reshape(pmd2_out.shape[1:]) + assert len(px2_out.shape) > 0 and px2_out.shape[0] == 1 + px2_out = px2_out.reshape(px2_out.shape[1:]) + assert len(rv2_out.shape) > 0 and rv2_out.shape[0] == 1 + rv2_out = rv2_out.reshape(rv2_out.shape[1:]) + + return ra2_out, dec2_out, pmr2_out, pmd2_out, px2_out, rv2_out +STATUS_CODES['starpm'] = {-1: 'system error (should not occur)', 0: 'no warnings or errors', 1: 'distance overridden (Note 6)', 2: 'excessive velocity (Note 7)', 4: "solution didn't converge (Note 8)", 'else': 'binary logical OR of the above warnings'} + + + +def eceq06(date1, date2, dl, db): + """ + Wrapper for ERFA function ``eraEceq06``. + + Parameters + ---------- + date1 : double array + date2 : double array + dl : double array + db : double array + + Returns + ------- + dr : double array + dd : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a E c e q 0 6 + - - - - - - - - - - + + Transformation from ecliptic coordinates (mean equinox and ecliptic + of date) to ICRS RA,Dec, using the IAU 2006 precession model. + + Given: + date1,date2 double TT as a 2-part Julian date (Note 1) + dl,db double ecliptic longitude and latitude (radians) + + Returned: + dr,dd double ICRS right ascension and declination (radians) + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) No assumptions are made about whether the coordinates represent + starlight and embody astrometric effects such as parallax or + aberration. + + 3) The transformation is approximately that from ecliptic longitude + and latitude (mean equinox and ecliptic of date) to mean J2000.0 + right ascension and declination, with only frame bias (always + less than 25 mas) to disturb this classical picture. + + Called: + eraS2c spherical coordinates to unit vector + eraEcm06 J2000.0 to ecliptic rotation matrix, IAU 2006 + eraTrxp product of transpose of r-matrix and p-vector + eraC2s unit vector to spherical coordinates + eraAnp normalize angle into range 0 to 2pi + eraAnpm normalize angle into range +/- pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + dl_in = numpy.array(dl, dtype=numpy.double, order="C", copy=False, subok=True) + db_in = numpy.array(db, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, dl_in, db_in) + dr_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dd_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dl_in, db_in, dr_out, dd_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._eceq06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dr_out.shape) > 0 and dr_out.shape[0] == 1 + dr_out = dr_out.reshape(dr_out.shape[1:]) + assert len(dd_out.shape) > 0 and dd_out.shape[0] == 1 + dd_out = dd_out.reshape(dd_out.shape[1:]) + + return dr_out, dd_out + + +def ecm06(date1, date2): + """ + Wrapper for ERFA function ``eraEcm06``. + + Parameters + ---------- + date1 : double array + date2 : double array + + Returns + ------- + rm : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E c m 0 6 + - - - - - - - - - + + ICRS equatorial to ecliptic rotation matrix, IAU 2006. + + Given: + date1,date2 double TT as a 2-part Julian date (Note 1) + + Returned: + rm double[3][3] ICRS to ecliptic rotation matrix + + Notes: + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 1) The matrix is in the sense + + E_ep = rm x P_ICRS, + + where P_ICRS is a vector with respect to ICRS right ascension + and declination axes and E_ep is the same vector with respect to + the (inertial) ecliptic and equinox of date. + + 2) P_ICRS is a free vector, merely a direction, typically of unit + magnitude, and not bound to any particular spatial origin, such + as the Earth, Sun or SSB. No assumptions are made about whether + it represents starlight and embodies astrometric effects such as + parallax or aberration. The transformation is approximately that + between mean J2000.0 right ascension and declination and ecliptic + longitude and latitude, with only frame bias (always less than + 25 mas) to disturb this classical picture. + + Called: + eraObl06 mean obliquity, IAU 2006 + eraPmat06 PB matrix, IAU 2006 + eraIr initialize r-matrix to identity + eraRx rotate around X-axis + eraRxr product of two r-matrices + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in) + rm_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, rm_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ecm06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rm_out.shape) > 0 and rm_out.shape[0] == 1 + rm_out = rm_out.reshape(rm_out.shape[1:]) + + return rm_out + + +def eqec06(date1, date2, dr, dd): + """ + Wrapper for ERFA function ``eraEqec06``. + + Parameters + ---------- + date1 : double array + date2 : double array + dr : double array + dd : double array + + Returns + ------- + dl : double array + db : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a E q e c 0 6 + - - - - - - - - - - + + Transformation from ICRS equatorial coordinates to ecliptic + coordinates (mean equinox and ecliptic of date) using IAU 2006 + precession model. + + Given: + date1,date2 double TT as a 2-part Julian date (Note 1) + dr,dd double ICRS right ascension and declination (radians) + + Returned: + dl,db double ecliptic longitude and latitude (radians) + + 1) The TT date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + 2) No assumptions are made about whether the coordinates represent + starlight and embody astrometric effects such as parallax or + aberration. + + 3) The transformation is approximately that from mean J2000.0 right + ascension and declination to ecliptic longitude and latitude + (mean equinox and ecliptic of date), with only frame bias (always + less than 25 mas) to disturb this classical picture. + + Called: + eraS2c spherical coordinates to unit vector + eraEcm06 J2000.0 to ecliptic rotation matrix, IAU 2006 + eraRxp product of r-matrix and p-vector + eraC2s unit vector to spherical coordinates + eraAnp normalize angle into range 0 to 2pi + eraAnpm normalize angle into range +/- pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + dr_in = numpy.array(dr, dtype=numpy.double, order="C", copy=False, subok=True) + dd_in = numpy.array(dd, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, dr_in, dd_in) + dl_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + db_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, dr_in, dd_in, dl_out, db_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._eqec06(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dl_out.shape) > 0 and dl_out.shape[0] == 1 + dl_out = dl_out.reshape(dl_out.shape[1:]) + assert len(db_out.shape) > 0 and db_out.shape[0] == 1 + db_out = db_out.reshape(db_out.shape[1:]) + + return dl_out, db_out + + +def lteceq(epj, dl, db): + """ + Wrapper for ERFA function ``eraLteceq``. + + Parameters + ---------- + epj : double array + dl : double array + db : double array + + Returns + ------- + dr : double array + dd : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a L t e c e q + - - - - - - - - - - + + Transformation from ecliptic coordinates (mean equinox and ecliptic + of date) to ICRS RA,Dec, using a long-term precession model. + + Given: + epj double Julian epoch (TT) + dl,db double ecliptic longitude and latitude (radians) + + Returned: + dr,dd double ICRS right ascension and declination (radians) + + 1) No assumptions are made about whether the coordinates represent + starlight and embody astrometric effects such as parallax or + aberration. + + 2) The transformation is approximately that from ecliptic longitude + and latitude (mean equinox and ecliptic of date) to mean J2000.0 + right ascension and declination, with only frame bias (always + less than 25 mas) to disturb this classical picture. + + 3) The Vondrak et al. (2011, 2012) 400 millennia precession model + agrees with the IAU 2006 precession at J2000.0 and stays within + 100 microarcseconds during the 20th and 21st centuries. It is + accurate to a few arcseconds throughout the historical period, + worsening to a few tenths of a degree at the end of the + +/- 200,000 year time span. + + Called: + eraS2c spherical coordinates to unit vector + eraLtecm J2000.0 to ecliptic rotation matrix, long term + eraTrxp product of transpose of r-matrix and p-vector + eraC2s unit vector to spherical coordinates + eraAnp normalize angle into range 0 to 2pi + eraAnpm normalize angle into range +/- pi + + References: + + Vondrak, J., Capitaine, N. and Wallace, P., 2011, New precession + expressions, valid for long time intervals, Astron.Astrophys. 534, + A22 + + Vondrak, J., Capitaine, N. and Wallace, P., 2012, New precession + expressions, valid for long time intervals (Corrigendum), + Astron.Astrophys. 541, C1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + dl_in = numpy.array(dl, dtype=numpy.double, order="C", copy=False, subok=True) + db_in = numpy.array(db, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in, dl_in, db_in) + dr_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dd_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, dl_in, db_in, dr_out, dd_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._lteceq(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dr_out.shape) > 0 and dr_out.shape[0] == 1 + dr_out = dr_out.reshape(dr_out.shape[1:]) + assert len(dd_out.shape) > 0 and dd_out.shape[0] == 1 + dd_out = dd_out.reshape(dd_out.shape[1:]) + + return dr_out, dd_out + + +def ltecm(epj): + """ + Wrapper for ERFA function ``eraLtecm``. + + Parameters + ---------- + epj : double array + + Returns + ------- + rm : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a L t e c m + - - - - - - - - - + + ICRS equatorial to ecliptic rotation matrix, long-term. + + Given: + epj double Julian epoch (TT) + + Returned: + rm double[3][3] ICRS to ecliptic rotation matrix + + Notes: + + 1) The matrix is in the sense + + E_ep = rm x P_ICRS, + + where P_ICRS is a vector with respect to ICRS right ascension + and declination axes and E_ep is the same vector with respect to + the (inertial) ecliptic and equinox of epoch epj. + + 2) P_ICRS is a free vector, merely a direction, typically of unit + magnitude, and not bound to any particular spatial origin, such + as the Earth, Sun or SSB. No assumptions are made about whether + it represents starlight and embodies astrometric effects such as + parallax or aberration. The transformation is approximately that + between mean J2000.0 right ascension and declination and ecliptic + longitude and latitude, with only frame bias (always less than + 25 mas) to disturb this classical picture. + + 3) The Vondrak et al. (2011, 2012) 400 millennia precession model + agrees with the IAU 2006 precession at J2000.0 and stays within + 100 microarcseconds during the 20th and 21st centuries. It is + accurate to a few arcseconds throughout the historical period, + worsening to a few tenths of a degree at the end of the + +/- 200,000 year time span. + + Called: + eraLtpequ equator pole, long term + eraLtpecl ecliptic pole, long term + eraPxp vector product + eraPn normalize vector + + References: + + Vondrak, J., Capitaine, N. and Wallace, P., 2011, New precession + expressions, valid for long time intervals, Astron.Astrophys. 534, + A22 + + Vondrak, J., Capitaine, N. and Wallace, P., 2012, New precession + expressions, valid for long time intervals (Corrigendum), + Astron.Astrophys. 541, C1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in) + rm_out = numpy.empty(broadcast.shape + (3, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, rm_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ltecm(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rm_out.shape) > 0 and rm_out.shape[0] == 1 + rm_out = rm_out.reshape(rm_out.shape[1:]) + + return rm_out + + +def lteqec(epj, dr, dd): + """ + Wrapper for ERFA function ``eraLteqec``. + + Parameters + ---------- + epj : double array + dr : double array + dd : double array + + Returns + ------- + dl : double array + db : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a L t e q e c + - - - - - - - - - - + + Transformation from ICRS equatorial coordinates to ecliptic + coordinates (mean equinox and ecliptic of date) using a long-term + precession model. + + Given: + epj double Julian epoch (TT) + dr,dd double ICRS right ascension and declination (radians) + + Returned: + dl,db double ecliptic longitude and latitude (radians) + + 1) No assumptions are made about whether the coordinates represent + starlight and embody astrometric effects such as parallax or + aberration. + + 2) The transformation is approximately that from mean J2000.0 right + ascension and declination to ecliptic longitude and latitude + (mean equinox and ecliptic of date), with only frame bias (always + less than 25 mas) to disturb this classical picture. + + 3) The Vondrak et al. (2011, 2012) 400 millennia precession model + agrees with the IAU 2006 precession at J2000.0 and stays within + 100 microarcseconds during the 20th and 21st centuries. It is + accurate to a few arcseconds throughout the historical period, + worsening to a few tenths of a degree at the end of the + +/- 200,000 year time span. + + Called: + eraS2c spherical coordinates to unit vector + eraLtecm J2000.0 to ecliptic rotation matrix, long term + eraRxp product of r-matrix and p-vector + eraC2s unit vector to spherical coordinates + eraAnp normalize angle into range 0 to 2pi + eraAnpm normalize angle into range +/- pi + + References: + + Vondrak, J., Capitaine, N. and Wallace, P., 2011, New precession + expressions, valid for long time intervals, Astron.Astrophys. 534, + A22 + + Vondrak, J., Capitaine, N. and Wallace, P., 2012, New precession + expressions, valid for long time intervals (Corrigendum), + Astron.Astrophys. 541, C1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + epj_in = numpy.array(epj, dtype=numpy.double, order="C", copy=False, subok=True) + dr_in = numpy.array(dr, dtype=numpy.double, order="C", copy=False, subok=True) + dd_in = numpy.array(dd, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), epj_in, dr_in, dd_in) + dl_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + db_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [epj_in, dr_in, dd_in, dl_out, db_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._lteqec(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dl_out.shape) > 0 and dl_out.shape[0] == 1 + dl_out = dl_out.reshape(dl_out.shape[1:]) + assert len(db_out.shape) > 0 and db_out.shape[0] == 1 + db_out = db_out.reshape(db_out.shape[1:]) + + return dl_out, db_out + + +def g2icrs(dl, db): + """ + Wrapper for ERFA function ``eraG2icrs``. + + Parameters + ---------- + dl : double array + db : double array + + Returns + ------- + dr : double array + dd : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G 2 i c r s + - - - - - - - - - - + + Transformation from Galactic Coordinates to ICRS. + + Given: + dl double galactic longitude (radians) + db double galactic latitude (radians) + + Returned: + dr double ICRS right ascension (radians) + dd double ICRS declination (radians) + + Notes: + + 1) The IAU 1958 system of Galactic coordinates was defined with + respect to the now obsolete reference system FK4 B1950.0. When + interpreting the system in a modern context, several factors have + to be taken into account: + + . The inclusion in FK4 positions of the E-terms of aberration. + + . The distortion of the FK4 proper motion system by differential + Galactic rotation. + + . The use of the B1950.0 equinox rather than the now-standard + J2000.0. + + . The frame bias between ICRS and the J2000.0 mean place system. + + The Hipparcos Catalogue (Perryman & ESA 1997) provides a rotation + matrix that transforms directly between ICRS and Galactic + coordinates with the above factors taken into account. The + matrix is derived from three angles, namely the ICRS coordinates + of the Galactic pole and the longitude of the ascending node of + the galactic equator on the ICRS equator. They are given in + degrees to five decimal places and for canonical purposes are + regarded as exact. In the Hipparcos Catalogue the matrix + elements are given to 10 decimal places (about 20 microarcsec). + In the present ERFA function the matrix elements have been + recomputed from the canonical three angles and are given to 30 + decimal places. + + 2) The inverse transformation is performed by the function eraIcrs2g. + + Called: + eraAnp normalize angle into range 0 to 2pi + eraAnpm normalize angle into range +/- pi + eraS2c spherical coordinates to unit vector + eraTrxp product of transpose of r-matrix and p-vector + eraC2s p-vector to spherical + + Reference: + Perryman M.A.C. & ESA, 1997, ESA SP-1200, The Hipparcos and Tycho + catalogues. Astrometric and photometric star catalogues + derived from the ESA Hipparcos Space Astrometry Mission. ESA + Publications Division, Noordwijk, Netherlands. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + dl_in = numpy.array(dl, dtype=numpy.double, order="C", copy=False, subok=True) + db_in = numpy.array(db, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), dl_in, db_in) + dr_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + dd_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dl_in, db_in, dr_out, dd_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._g2icrs(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dr_out.shape) > 0 and dr_out.shape[0] == 1 + dr_out = dr_out.reshape(dr_out.shape[1:]) + assert len(dd_out.shape) > 0 and dd_out.shape[0] == 1 + dd_out = dd_out.reshape(dd_out.shape[1:]) + + return dr_out, dd_out + + +def icrs2g(dr, dd): + """ + Wrapper for ERFA function ``eraIcrs2g``. + + Parameters + ---------- + dr : double array + dd : double array + + Returns + ------- + dl : double array + db : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a I c r s 2 g + - - - - - - - - - - + + Transformation from ICRS to Galactic Coordinates. + + Given: + dr double ICRS right ascension (radians) + dd double ICRS declination (radians) + + Returned: + dl double galactic longitude (radians) + db double galactic latitude (radians) + + Notes: + + 1) The IAU 1958 system of Galactic coordinates was defined with + respect to the now obsolete reference system FK4 B1950.0. When + interpreting the system in a modern context, several factors have + to be taken into account: + + . The inclusion in FK4 positions of the E-terms of aberration. + + . The distortion of the FK4 proper motion system by differential + Galactic rotation. + + . The use of the B1950.0 equinox rather than the now-standard + J2000.0. + + . The frame bias between ICRS and the J2000.0 mean place system. + + The Hipparcos Catalogue (Perryman & ESA 1997) provides a rotation + matrix that transforms directly between ICRS and Galactic + coordinates with the above factors taken into account. The + matrix is derived from three angles, namely the ICRS coordinates + of the Galactic pole and the longitude of the ascending node of + the galactic equator on the ICRS equator. They are given in + degrees to five decimal places and for canonical purposes are + regarded as exact. In the Hipparcos Catalogue the matrix + elements are given to 10 decimal places (about 20 microarcsec). + In the present ERFA function the matrix elements have been + recomputed from the canonical three angles and are given to 30 + decimal places. + + 2) The inverse transformation is performed by the function eraG2icrs. + + Called: + eraAnp normalize angle into range 0 to 2pi + eraAnpm normalize angle into range +/- pi + eraS2c spherical coordinates to unit vector + eraRxp product of r-matrix and p-vector + eraC2s p-vector to spherical + + Reference: + Perryman M.A.C. & ESA, 1997, ESA SP-1200, The Hipparcos and Tycho + catalogues. Astrometric and photometric star catalogues + derived from the ESA Hipparcos Space Astrometry Mission. ESA + Publications Division, Noordwijk, Netherlands. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + dr_in = numpy.array(dr, dtype=numpy.double, order="C", copy=False, subok=True) + dd_in = numpy.array(dd, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), dr_in, dd_in) + dl_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + db_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [dr_in, dd_in, dl_out, db_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._icrs2g(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(dl_out.shape) > 0 and dl_out.shape[0] == 1 + dl_out = dl_out.reshape(dl_out.shape[1:]) + assert len(db_out.shape) > 0 and db_out.shape[0] == 1 + db_out = db_out.reshape(db_out.shape[1:]) + + return dl_out, db_out + + +def eform(n): + """ + Wrapper for ERFA function ``eraEform``. + + Parameters + ---------- + n : int array + + Returns + ------- + a : double array + f : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a E f o r m + - - - - - - - - - + + Earth reference ellipsoids. + + Given: + n int ellipsoid identifier (Note 1) + + Returned: + a double equatorial radius (meters, Note 2) + f double flattening (Note 2) + + Returned (function value): + int status: 0 = OK + -1 = illegal identifier (Note 3) + + Notes: + + 1) The identifier n is a number that specifies the choice of + reference ellipsoid. The following are supported: + + n ellipsoid + + 1 ERFA_WGS84 + 2 ERFA_GRS80 + 3 ERFA_WGS72 + + The n value has no significance outside the ERFA software. For + convenience, symbols ERFA_WGS84 etc. are defined in erfam.h. + + 2) The ellipsoid parameters are returned in the form of equatorial + radius in meters (a) and flattening (f). The latter is a number + around 0.00335, i.e. around 1/298. + + 3) For the case where an unsupported n value is supplied, zero a and + f are returned, as well as error status. + + References: + + Department of Defense World Geodetic System 1984, National + Imagery and Mapping Agency Technical Report 8350.2, Third + Edition, p3-2. + + Moritz, H., Bull. Geodesique 66-2, 187 (1992). + + The Department of Defense World Geodetic System 1972, World + Geodetic System Committee, May 1974. + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + p220. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + n_in = numpy.array(n, dtype=numpy.intc, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), n_in) + a_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + f_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [n_in, a_out, f_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._eform(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'eform') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(a_out.shape) > 0 and a_out.shape[0] == 1 + a_out = a_out.reshape(a_out.shape[1:]) + assert len(f_out.shape) > 0 and f_out.shape[0] == 1 + f_out = f_out.reshape(f_out.shape[1:]) + + return a_out, f_out +STATUS_CODES['eform'] = {0: 'OK', -1: 'illegal identifier (Note 3)'} + + + +def gc2gd(n, xyz): + """ + Wrapper for ERFA function ``eraGc2gd``. + + Parameters + ---------- + n : int array + xyz : double array + + Returns + ------- + elong : double array + phi : double array + height : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a G c 2 g d + - - - - - - - - - + + Transform geocentric coordinates to geodetic using the specified + reference ellipsoid. + + Given: + n int ellipsoid identifier (Note 1) + xyz double[3] geocentric vector (Note 2) + + Returned: + elong double longitude (radians, east +ve, Note 3) + phi double latitude (geodetic, radians, Note 3) + height double height above ellipsoid (geodetic, Notes 2,3) + + Returned (function value): + int status: 0 = OK + -1 = illegal identifier (Note 3) + -2 = internal error (Note 3) + + Notes: + + 1) The identifier n is a number that specifies the choice of + reference ellipsoid. The following are supported: + + n ellipsoid + + 1 ERFA_WGS84 + 2 ERFA_GRS80 + 3 ERFA_WGS72 + + The n value has no significance outside the ERFA software. For + convenience, symbols ERFA_WGS84 etc. are defined in erfam.h. + + 2) The geocentric vector (xyz, given) and height (height, returned) + are in meters. + + 3) An error status -1 means that the identifier n is illegal. An + error status -2 is theoretically impossible. In all error cases, + all three results are set to -1e9. + + 4) The inverse transformation is performed in the function eraGd2gc. + + Called: + eraEform Earth reference ellipsoids + eraGc2gde geocentric to geodetic transformation, general + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + n_in = numpy.array(n, dtype=numpy.intc, order="C", copy=False, subok=True) + xyz_in = numpy.array(xyz, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(xyz_in, (3,), "xyz") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), n_in, xyz_in[...,0]) + elong_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + phi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + height_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [n_in, xyz_in[...,0], elong_out, phi_out, height_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*4 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gc2gd(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'gc2gd') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(elong_out.shape) > 0 and elong_out.shape[0] == 1 + elong_out = elong_out.reshape(elong_out.shape[1:]) + assert len(phi_out.shape) > 0 and phi_out.shape[0] == 1 + phi_out = phi_out.reshape(phi_out.shape[1:]) + assert len(height_out.shape) > 0 and height_out.shape[0] == 1 + height_out = height_out.reshape(height_out.shape[1:]) + + return elong_out, phi_out, height_out +STATUS_CODES['gc2gd'] = {0: 'OK', -1: 'illegal identifier (Note 3)', -2: 'internal error (Note 3)'} + + + +def gc2gde(a, f, xyz): + """ + Wrapper for ERFA function ``eraGc2gde``. + + Parameters + ---------- + a : double array + f : double array + xyz : double array + + Returns + ------- + elong : double array + phi : double array + height : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G c 2 g d e + - - - - - - - - - - + + Transform geocentric coordinates to geodetic for a reference + ellipsoid of specified form. + + Given: + a double equatorial radius (Notes 2,4) + f double flattening (Note 3) + xyz double[3] geocentric vector (Note 4) + + Returned: + elong double longitude (radians, east +ve) + phi double latitude (geodetic, radians) + height double height above ellipsoid (geodetic, Note 4) + + Returned (function value): + int status: 0 = OK + -1 = illegal f + -2 = illegal a + + Notes: + + 1) This function is based on the GCONV2H Fortran subroutine by + Toshio Fukushima (see reference). + + 2) The equatorial radius, a, can be in any units, but meters is + the conventional choice. + + 3) The flattening, f, is (for the Earth) a value around 0.00335, + i.e. around 1/298. + + 4) The equatorial radius, a, and the geocentric vector, xyz, + must be given in the same units, and determine the units of + the returned height, height. + + 5) If an error occurs (status < 0), elong, phi and height are + unchanged. + + 6) The inverse transformation is performed in the function + eraGd2gce. + + 7) The transformation for a standard ellipsoid (such as ERFA_WGS84) can + more conveniently be performed by calling eraGc2gd, which uses a + numerical code to identify the required A and F values. + + Reference: + + Fukushima, T., "Transformation from Cartesian to geodetic + coordinates accelerated by Halley's method", J.Geodesy (2006) + 79: 689-693 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + a_in = numpy.array(a, dtype=numpy.double, order="C", copy=False, subok=True) + f_in = numpy.array(f, dtype=numpy.double, order="C", copy=False, subok=True) + xyz_in = numpy.array(xyz, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(xyz_in, (3,), "xyz") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), a_in, f_in, xyz_in[...,0]) + elong_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + phi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + height_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [a_in, f_in, xyz_in[...,0], elong_out, phi_out, height_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*4 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gc2gde(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'gc2gde') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(elong_out.shape) > 0 and elong_out.shape[0] == 1 + elong_out = elong_out.reshape(elong_out.shape[1:]) + assert len(phi_out.shape) > 0 and phi_out.shape[0] == 1 + phi_out = phi_out.reshape(phi_out.shape[1:]) + assert len(height_out.shape) > 0 and height_out.shape[0] == 1 + height_out = height_out.reshape(height_out.shape[1:]) + + return elong_out, phi_out, height_out +STATUS_CODES['gc2gde'] = {0: 'OK', -1: 'illegal f', -2: 'illegal a'} + + + +def gd2gc(n, elong, phi, height): + """ + Wrapper for ERFA function ``eraGd2gc``. + + Parameters + ---------- + n : int array + elong : double array + phi : double array + height : double array + + Returns + ------- + xyz : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a G d 2 g c + - - - - - - - - - + + Transform geodetic coordinates to geocentric using the specified + reference ellipsoid. + + Given: + n int ellipsoid identifier (Note 1) + elong double longitude (radians, east +ve) + phi double latitude (geodetic, radians, Note 3) + height double height above ellipsoid (geodetic, Notes 2,3) + + Returned: + xyz double[3] geocentric vector (Note 2) + + Returned (function value): + int status: 0 = OK + -1 = illegal identifier (Note 3) + -2 = illegal case (Note 3) + + Notes: + + 1) The identifier n is a number that specifies the choice of + reference ellipsoid. The following are supported: + + n ellipsoid + + 1 ERFA_WGS84 + 2 ERFA_GRS80 + 3 ERFA_WGS72 + + The n value has no significance outside the ERFA software. For + convenience, symbols ERFA_WGS84 etc. are defined in erfam.h. + + 2) The height (height, given) and the geocentric vector (xyz, + returned) are in meters. + + 3) No validation is performed on the arguments elong, phi and + height. An error status -1 means that the identifier n is + illegal. An error status -2 protects against cases that would + lead to arithmetic exceptions. In all error cases, xyz is set + to zeros. + + 4) The inverse transformation is performed in the function eraGc2gd. + + Called: + eraEform Earth reference ellipsoids + eraGd2gce geodetic to geocentric transformation, general + eraZp zero p-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + n_in = numpy.array(n, dtype=numpy.intc, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + height_in = numpy.array(height, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), n_in, elong_in, phi_in, height_in) + xyz_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [n_in, elong_in, phi_in, height_in, xyz_out[...,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gd2gc(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'gd2gc') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(xyz_out.shape) > 0 and xyz_out.shape[0] == 1 + xyz_out = xyz_out.reshape(xyz_out.shape[1:]) + + return xyz_out +STATUS_CODES['gd2gc'] = {0: 'OK', -1: 'illegal identifier (Note 3)', -2: 'illegal case (Note 3)'} + + + +def gd2gce(a, f, elong, phi, height): + """ + Wrapper for ERFA function ``eraGd2gce``. + + Parameters + ---------- + a : double array + f : double array + elong : double array + phi : double array + height : double array + + Returns + ------- + xyz : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a G d 2 g c e + - - - - - - - - - - + + Transform geodetic coordinates to geocentric for a reference + ellipsoid of specified form. + + Given: + a double equatorial radius (Notes 1,4) + f double flattening (Notes 2,4) + elong double longitude (radians, east +ve) + phi double latitude (geodetic, radians, Note 4) + height double height above ellipsoid (geodetic, Notes 3,4) + + Returned: + xyz double[3] geocentric vector (Note 3) + + Returned (function value): + int status: 0 = OK + -1 = illegal case (Note 4) + Notes: + + 1) The equatorial radius, a, can be in any units, but meters is + the conventional choice. + + 2) The flattening, f, is (for the Earth) a value around 0.00335, + i.e. around 1/298. + + 3) The equatorial radius, a, and the height, height, must be + given in the same units, and determine the units of the + returned geocentric vector, xyz. + + 4) No validation is performed on individual arguments. The error + status -1 protects against (unrealistic) cases that would lead + to arithmetic exceptions. If an error occurs, xyz is unchanged. + + 5) The inverse transformation is performed in the function + eraGc2gde. + + 6) The transformation for a standard ellipsoid (such as ERFA_WGS84) can + more conveniently be performed by calling eraGd2gc, which uses a + numerical code to identify the required a and f values. + + References: + + Green, R.M., Spherical Astronomy, Cambridge University Press, + (1985) Section 4.5, p96. + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992), + Section 4.22, p202. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + a_in = numpy.array(a, dtype=numpy.double, order="C", copy=False, subok=True) + f_in = numpy.array(f, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + height_in = numpy.array(height, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), a_in, f_in, elong_in, phi_in, height_in) + xyz_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [a_in, f_in, elong_in, phi_in, height_in, xyz_out[...,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*5 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._gd2gce(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'gd2gce') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(xyz_out.shape) > 0 and xyz_out.shape[0] == 1 + xyz_out = xyz_out.reshape(xyz_out.shape[1:]) + + return xyz_out +STATUS_CODES['gd2gce'] = {0: 'OK', -1: 'illegal case (Note 4)Notes:'} + + + +def d2dtf(scale, ndp, d1, d2): + """ + Wrapper for ERFA function ``eraD2dtf``. + + Parameters + ---------- + scale : const char array + ndp : int array + d1 : double array + d2 : double array + + Returns + ------- + iy : int array + im : int array + id : int array + ihmsf : int array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a D 2 d t f + - - - - - - - - - + + Format for output a 2-part Julian Date (or in the case of UTC a + quasi-JD form that includes special provision for leap seconds). + + Given: + scale char[] time scale ID (Note 1) + ndp int resolution (Note 2) + d1,d2 double time as a 2-part Julian Date (Notes 3,4) + + Returned: + iy,im,id int year, month, day in Gregorian calendar (Note 5) + ihmsf int[4] hours, minutes, seconds, fraction (Note 1) + + Returned (function value): + int status: +1 = dubious year (Note 5) + 0 = OK + -1 = unacceptable date (Note 6) + + Notes: + + 1) scale identifies the time scale. Only the value "UTC" (in upper + case) is significant, and enables handling of leap seconds (see + Note 4). + + 2) ndp is the number of decimal places in the seconds field, and can + have negative as well as positive values, such as: + + ndp resolution + -4 1 00 00 + -3 0 10 00 + -2 0 01 00 + -1 0 00 10 + 0 0 00 01 + 1 0 00 00.1 + 2 0 00 00.01 + 3 0 00 00.001 + + The limits are platform dependent, but a safe range is -5 to +9. + + 3) d1+d2 is Julian Date, apportioned in any convenient way between + the two arguments, for example where d1 is the Julian Day Number + and d2 is the fraction of a day. In the case of UTC, where the + use of JD is problematical, special conventions apply: see the + next note. + + 4) JD cannot unambiguously represent UTC during a leap second unless + special measures are taken. The ERFA internal convention is that + the quasi-JD day represents UTC days whether the length is 86399, + 86400 or 86401 SI seconds. In the 1960-1972 era there were + smaller jumps (in either direction) each time the linear UTC(TAI) + expression was changed, and these "mini-leaps" are also included + in the ERFA convention. + + 5) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the future + to be trusted. See eraDat for further details. + + 6) For calendar conventions and limitations, see eraCal2jd. + + Called: + eraJd2cal JD to Gregorian calendar + eraD2tf decompose days to hms + eraDat delta(AT) = TAI-UTC + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + scale_in = numpy.array(scale, dtype=numpy.dtype('S16'), order="C", copy=False, subok=True) + ndp_in = numpy.array(ndp, dtype=numpy.intc, order="C", copy=False, subok=True) + d1_in = numpy.array(d1, dtype=numpy.double, order="C", copy=False, subok=True) + d2_in = numpy.array(d2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), scale_in, ndp_in, d1_in, d2_in) + iy_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + im_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + id_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + ihmsf_out = numpy.empty(broadcast.shape + (4,), dtype=numpy.intc) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [scale_in, ndp_in, d1_in, d2_in, iy_out, im_out, id_out, ihmsf_out[...,0], c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*5 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._d2dtf(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'd2dtf') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(iy_out.shape) > 0 and iy_out.shape[0] == 1 + iy_out = iy_out.reshape(iy_out.shape[1:]) + assert len(im_out.shape) > 0 and im_out.shape[0] == 1 + im_out = im_out.reshape(im_out.shape[1:]) + assert len(id_out.shape) > 0 and id_out.shape[0] == 1 + id_out = id_out.reshape(id_out.shape[1:]) + assert len(ihmsf_out.shape) > 0 and ihmsf_out.shape[0] == 1 + ihmsf_out = ihmsf_out.reshape(ihmsf_out.shape[1:]) + + return iy_out, im_out, id_out, ihmsf_out +STATUS_CODES['d2dtf'] = {1: 'dubious year (Note 5)', 0: 'OK', -1: 'unacceptable date (Note 6)'} + + + +def dat(iy, im, id, fd): + """ + Wrapper for ERFA function ``eraDat``. + + Parameters + ---------- + iy : int array + im : int array + id : int array + fd : double array + + Returns + ------- + deltat : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a D a t + - - - - - - - + + For a given UTC date, calculate delta(AT) = TAI-UTC. + + :------------------------------------------: + : : + : IMPORTANT : + : : + : A new version of this function must be : + : produced whenever a new leap second is : + : announced. There are four items to : + : change on each such occasion: : + : : + : 1) A new line must be added to the set : + : of statements that initialize the : + : array "changes". : + : : + : 2) The constant IYV must be set to the : + : current year. : + : : + : 3) The "Latest leap second" comment : + : below must be set to the new leap : + : second date. : + : : + : 4) The "This revision" comment, later, : + : must be set to the current date. : + : : + : Change (2) must also be carried out : + : whenever the function is re-issued, : + : even if no leap seconds have been : + : added. : + : : + : Latest leap second: 2016 December 31 : + : : + :__________________________________________: + + Given: + iy int UTC: year (Notes 1 and 2) + im int month (Note 2) + id int day (Notes 2 and 3) + fd double fraction of day (Note 4) + + Returned: + deltat double TAI minus UTC, seconds + + Returned (function value): + int status (Note 5): + 1 = dubious year (Note 1) + 0 = OK + -1 = bad year + -2 = bad month + -3 = bad day (Note 3) + -4 = bad fraction (Note 4) + -5 = internal error (Note 5) + + Notes: + + 1) UTC began at 1960 January 1.0 (JD 2436934.5) and it is improper + to call the function with an earlier date. If this is attempted, + zero is returned together with a warning status. + + Because leap seconds cannot, in principle, be predicted in + advance, a reliable check for dates beyond the valid range is + impossible. To guard against gross errors, a year five or more + after the release year of the present function (see the constant + IYV) is considered dubious. In this case a warning status is + returned but the result is computed in the normal way. + + For both too-early and too-late years, the warning status is +1. + This is distinct from the error status -1, which signifies a year + so early that JD could not be computed. + + 2) If the specified date is for a day which ends with a leap second, + the TAI-UTC value returned is for the period leading up to the + leap second. If the date is for a day which begins as a leap + second ends, the TAI-UTC returned is for the period following the + leap second. + + 3) The day number must be in the normal calendar range, for example + 1 through 30 for April. The "almanac" convention of allowing + such dates as January 0 and December 32 is not supported in this + function, in order to avoid confusion near leap seconds. + + 4) The fraction of day is used only for dates before the + introduction of leap seconds, the first of which occurred at the + end of 1971. It is tested for validity (0 to 1 is the valid + range) even if not used; if invalid, zero is used and status -4 + is returned. For many applications, setting fd to zero is + acceptable; the resulting error is always less than 3 ms (and + occurs only pre-1972). + + 5) The status value returned in the case where there are multiple + errors refers to the first error detected. For example, if the + month and day are 13 and 32 respectively, status -2 (bad month) + will be returned. The "internal error" status refers to a + case that is impossible but causes some compilers to issue a + warning. + + 6) In cases where a valid result is not available, zero is returned. + + References: + + 1) For dates from 1961 January 1 onwards, the expressions from the + file ftp://maia.usno.navy.mil/ser7/tai-utc.dat are used. + + 2) The 5ms timestep at 1961 January 1 is taken from 2.58.1 (p87) of + the 1992 Explanatory Supplement. + + Called: + eraCal2jd Gregorian calendar to JD + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + iy_in = numpy.array(iy, dtype=numpy.intc, order="C", copy=False, subok=True) + im_in = numpy.array(im, dtype=numpy.intc, order="C", copy=False, subok=True) + id_in = numpy.array(id, dtype=numpy.intc, order="C", copy=False, subok=True) + fd_in = numpy.array(fd, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), iy_in, im_in, id_in, fd_in) + deltat_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [iy_in, im_in, id_in, fd_in, deltat_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._dat(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'dat') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(deltat_out.shape) > 0 and deltat_out.shape[0] == 1 + deltat_out = deltat_out.reshape(deltat_out.shape[1:]) + + return deltat_out +STATUS_CODES['dat'] = {1: 'dubious year (Note 1)', 0: 'OK', -1: 'bad year', -2: 'bad month', -3: 'bad day (Note 3)', -4: 'bad fraction (Note 4)', -5: 'internal error (Note 5)'} + + + +def dtdb(date1, date2, ut, elong, u, v): + """ + Wrapper for ERFA function ``eraDtdb``. + + Parameters + ---------- + date1 : double array + date2 : double array + ut : double array + elong : double array + u : double array + v : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a D t d b + - - - - - - - - + + An approximation to TDB-TT, the difference between barycentric + dynamical time and terrestrial time, for an observer on the Earth. + + The different time scales - proper, coordinate and realized - are + related to each other: + + TAI <- physically realized + : + offset <- observed (nominally +32.184s) + : + TT <- terrestrial time + : + rate adjustment (L_G) <- definition of TT + : + TCG <- time scale for GCRS + : + "periodic" terms <- eraDtdb is an implementation + : + rate adjustment (L_C) <- function of solar-system ephemeris + : + TCB <- time scale for BCRS + : + rate adjustment (-L_B) <- definition of TDB + : + TDB <- TCB scaled to track TT + : + "periodic" terms <- -eraDtdb is an approximation + : + TT <- terrestrial time + + Adopted values for the various constants can be found in the IERS + Conventions (McCarthy & Petit 2003). + + Given: + date1,date2 double date, TDB (Notes 1-3) + ut double universal time (UT1, fraction of one day) + elong double longitude (east positive, radians) + u double distance from Earth spin axis (km) + v double distance north of equatorial plane (km) + + Returned (function value): + double TDB-TT (seconds) + + Notes: + + 1) The date date1+date2 is a Julian Date, apportioned in any + convenient way between the two arguments. For example, + JD(TT)=2450123.7 could be expressed in any of these ways, + among others: + + date1 date2 + + 2450123.7 0.0 (JD method) + 2451545.0 -1421.3 (J2000 method) + 2400000.5 50123.2 (MJD method) + 2450123.5 0.2 (date & time method) + + The JD method is the most natural and convenient to use in + cases where the loss of several decimal digits of resolution + is acceptable. The J2000 method is best matched to the way + the argument is handled internally and will deliver the + optimum resolution. The MJD method and the date & time methods + are both good compromises between resolution and convenience. + + Although the date is, formally, barycentric dynamical time (TDB), + the terrestrial dynamical time (TT) can be used with no practical + effect on the accuracy of the prediction. + + 2) TT can be regarded as a coordinate time that is realized as an + offset of 32.184s from International Atomic Time, TAI. TT is a + specific linear transformation of geocentric coordinate time TCG, + which is the time scale for the Geocentric Celestial Reference + System, GCRS. + + 3) TDB is a coordinate time, and is a specific linear transformation + of barycentric coordinate time TCB, which is the time scale for + the Barycentric Celestial Reference System, BCRS. + + 4) The difference TCG-TCB depends on the masses and positions of the + bodies of the solar system and the velocity of the Earth. It is + dominated by a rate difference, the residual being of a periodic + character. The latter, which is modeled by the present function, + comprises a main (annual) sinusoidal term of amplitude + approximately 0.00166 seconds, plus planetary terms up to about + 20 microseconds, and lunar and diurnal terms up to 2 microseconds. + These effects come from the changing transverse Doppler effect + and gravitational red-shift as the observer (on the Earth's + surface) experiences variations in speed (with respect to the + BCRS) and gravitational potential. + + 5) TDB can be regarded as the same as TCB but with a rate adjustment + to keep it close to TT, which is convenient for many applications. + The history of successive attempts to define TDB is set out in + Resolution 3 adopted by the IAU General Assembly in 2006, which + defines a fixed TDB(TCB) transformation that is consistent with + contemporary solar-system ephemerides. Future ephemerides will + imply slightly changed transformations between TCG and TCB, which + could introduce a linear drift between TDB and TT; however, any + such drift is unlikely to exceed 1 nanosecond per century. + + 6) The geocentric TDB-TT model used in the present function is that of + Fairhead & Bretagnon (1990), in its full form. It was originally + supplied by Fairhead (private communications with P.T.Wallace, + 1990) as a Fortran subroutine. The present C function contains an + adaptation of the Fairhead code. The numerical results are + essentially unaffected by the changes, the differences with + respect to the Fairhead & Bretagnon original being at the 1e-20 s + level. + + The topocentric part of the model is from Moyer (1981) and + Murray (1983), with fundamental arguments adapted from + Simon et al. 1994. It is an approximation to the expression + ( v / c ) . ( r / c ), where v is the barycentric velocity of + the Earth, r is the geocentric position of the observer and + c is the speed of light. + + By supplying zeroes for u and v, the topocentric part of the + model can be nullified, and the function will return the Fairhead + & Bretagnon result alone. + + 7) During the interval 1950-2050, the absolute accuracy is better + than +/- 3 nanoseconds relative to time ephemerides obtained by + direct numerical integrations based on the JPL DE405 solar system + ephemeris. + + 8) It must be stressed that the present function is merely a model, + and that numerical integration of solar-system ephemerides is the + definitive method for predicting the relationship between TCG and + TCB and hence between TT and TDB. + + References: + + Fairhead, L., & Bretagnon, P., Astron.Astrophys., 229, 240-247 + (1990). + + IAU 2006 Resolution 3. + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Moyer, T.D., Cel.Mech., 23, 33 (1981). + + Murray, C.A., Vectorial Astrometry, Adam Hilger (1983). + + Seidelmann, P.K. et al., Explanatory Supplement to the + Astronomical Almanac, Chapter 2, University Science Books (1992). + + Simon, J.L., Bretagnon, P., Chapront, J., Chapront-Touze, M., + Francou, G. & Laskar, J., Astron.Astrophys., 282, 663-683 (1994). + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + date1_in = numpy.array(date1, dtype=numpy.double, order="C", copy=False, subok=True) + date2_in = numpy.array(date2, dtype=numpy.double, order="C", copy=False, subok=True) + ut_in = numpy.array(ut, dtype=numpy.double, order="C", copy=False, subok=True) + elong_in = numpy.array(elong, dtype=numpy.double, order="C", copy=False, subok=True) + u_in = numpy.array(u, dtype=numpy.double, order="C", copy=False, subok=True) + v_in = numpy.array(v, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), date1_in, date2_in, ut_in, elong_in, u_in, v_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [date1_in, date2_in, ut_in, elong_in, u_in, v_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._dtdb(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def dtf2d(scale, iy, im, id, ihr, imn, sec): + """ + Wrapper for ERFA function ``eraDtf2d``. + + Parameters + ---------- + scale : const char array + iy : int array + im : int array + id : int array + ihr : int array + imn : int array + sec : double array + + Returns + ------- + d1 : double array + d2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a D t f 2 d + - - - - - - - - - + + Encode date and time fields into 2-part Julian Date (or in the case + of UTC a quasi-JD form that includes special provision for leap + seconds). + + Given: + scale char[] time scale ID (Note 1) + iy,im,id int year, month, day in Gregorian calendar (Note 2) + ihr,imn int hour, minute + sec double seconds + + Returned: + d1,d2 double 2-part Julian Date (Notes 3,4) + + Returned (function value): + int status: +3 = both of next two + +2 = time is after end of day (Note 5) + +1 = dubious year (Note 6) + 0 = OK + -1 = bad year + -2 = bad month + -3 = bad day + -4 = bad hour + -5 = bad minute + -6 = bad second (<0) + + Notes: + + 1) scale identifies the time scale. Only the value "UTC" (in upper + case) is significant, and enables handling of leap seconds (see + Note 4). + + 2) For calendar conventions and limitations, see eraCal2jd. + + 3) The sum of the results, d1+d2, is Julian Date, where normally d1 + is the Julian Day Number and d2 is the fraction of a day. In the + case of UTC, where the use of JD is problematical, special + conventions apply: see the next note. + + 4) JD cannot unambiguously represent UTC during a leap second unless + special measures are taken. The ERFA internal convention is that + the quasi-JD day represents UTC days whether the length is 86399, + 86400 or 86401 SI seconds. In the 1960-1972 era there were + smaller jumps (in either direction) each time the linear UTC(TAI) + expression was changed, and these "mini-leaps" are also included + in the ERFA convention. + + 5) The warning status "time is after end of day" usually means that + the sec argument is greater than 60.0. However, in a day ending + in a leap second the limit changes to 61.0 (or 59.0 in the case + of a negative leap second). + + 6) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the future + to be trusted. See eraDat for further details. + + 7) Only in the case of continuous and regular time scales (TAI, TT, + TCG, TCB and TDB) is the result d1+d2 a Julian Date, strictly + speaking. In the other cases (UT1 and UTC) the result must be + used with circumspection; in particular the difference between + two such results cannot be interpreted as a precise time + interval. + + Called: + eraCal2jd Gregorian calendar to JD + eraDat delta(AT) = TAI-UTC + eraJd2cal JD to Gregorian calendar + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + scale_in = numpy.array(scale, dtype=numpy.dtype('S16'), order="C", copy=False, subok=True) + iy_in = numpy.array(iy, dtype=numpy.intc, order="C", copy=False, subok=True) + im_in = numpy.array(im, dtype=numpy.intc, order="C", copy=False, subok=True) + id_in = numpy.array(id, dtype=numpy.intc, order="C", copy=False, subok=True) + ihr_in = numpy.array(ihr, dtype=numpy.intc, order="C", copy=False, subok=True) + imn_in = numpy.array(imn, dtype=numpy.intc, order="C", copy=False, subok=True) + sec_in = numpy.array(sec, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), scale_in, iy_in, im_in, id_in, ihr_in, imn_in, sec_in) + d1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + d2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [scale_in, iy_in, im_in, id_in, ihr_in, imn_in, sec_in, d1_out, d2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*7 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._dtf2d(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'dtf2d') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(d1_out.shape) > 0 and d1_out.shape[0] == 1 + d1_out = d1_out.reshape(d1_out.shape[1:]) + assert len(d2_out.shape) > 0 and d2_out.shape[0] == 1 + d2_out = d2_out.reshape(d2_out.shape[1:]) + + return d1_out, d2_out +STATUS_CODES['dtf2d'] = {3: 'both of next two', 2: 'time is after end of day (Note 5)', 1: 'dubious year (Note 6)', 0: 'OK', -1: 'bad year', -2: 'bad month', -3: 'bad day', -4: 'bad hour', -5: 'bad minute', -6: 'bad second (<0)'} + + + +def taitt(tai1, tai2): + """ + Wrapper for ERFA function ``eraTaitt``. + + Parameters + ---------- + tai1 : double array + tai2 : double array + + Returns + ------- + tt1 : double array + tt2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T a i t t + - - - - - - - - - + + Time scale transformation: International Atomic Time, TAI, to + Terrestrial Time, TT. + + Given: + tai1,tai2 double TAI as a 2-part Julian Date + + Returned: + tt1,tt2 double TT as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Note: + + tai1+tai2 is Julian Date, apportioned in any convenient way + between the two arguments, for example where tai1 is the Julian + Day Number and tai2 is the fraction of a day. The returned + tt1,tt2 follow suit. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tai1_in = numpy.array(tai1, dtype=numpy.double, order="C", copy=False, subok=True) + tai2_in = numpy.array(tai2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tai1_in, tai2_in) + tt1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tt2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tai1_in, tai2_in, tt1_out, tt2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._taitt(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'taitt') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tt1_out.shape) > 0 and tt1_out.shape[0] == 1 + tt1_out = tt1_out.reshape(tt1_out.shape[1:]) + assert len(tt2_out.shape) > 0 and tt2_out.shape[0] == 1 + tt2_out = tt2_out.reshape(tt2_out.shape[1:]) + + return tt1_out, tt2_out +STATUS_CODES['taitt'] = {0: 'OK'} + + + +def taiut1(tai1, tai2, dta): + """ + Wrapper for ERFA function ``eraTaiut1``. + + Parameters + ---------- + tai1 : double array + tai2 : double array + dta : double array + + Returns + ------- + ut11 : double array + ut12 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a T a i u t 1 + - - - - - - - - - - + + Time scale transformation: International Atomic Time, TAI, to + Universal Time, UT1. + + Given: + tai1,tai2 double TAI as a 2-part Julian Date + dta double UT1-TAI in seconds + + Returned: + ut11,ut12 double UT1 as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) tai1+tai2 is Julian Date, apportioned in any convenient way + between the two arguments, for example where tai1 is the Julian + Day Number and tai2 is the fraction of a day. The returned + UT11,UT12 follow suit. + + 2) The argument dta, i.e. UT1-TAI, is an observed quantity, and is + available from IERS tabulations. + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tai1_in = numpy.array(tai1, dtype=numpy.double, order="C", copy=False, subok=True) + tai2_in = numpy.array(tai2, dtype=numpy.double, order="C", copy=False, subok=True) + dta_in = numpy.array(dta, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tai1_in, tai2_in, dta_in) + ut11_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + ut12_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tai1_in, tai2_in, dta_in, ut11_out, ut12_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._taiut1(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'taiut1') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ut11_out.shape) > 0 and ut11_out.shape[0] == 1 + ut11_out = ut11_out.reshape(ut11_out.shape[1:]) + assert len(ut12_out.shape) > 0 and ut12_out.shape[0] == 1 + ut12_out = ut12_out.reshape(ut12_out.shape[1:]) + + return ut11_out, ut12_out +STATUS_CODES['taiut1'] = {0: 'OK'} + + + +def taiutc(tai1, tai2): + """ + Wrapper for ERFA function ``eraTaiutc``. + + Parameters + ---------- + tai1 : double array + tai2 : double array + + Returns + ------- + utc1 : double array + utc2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a T a i u t c + - - - - - - - - - - + + Time scale transformation: International Atomic Time, TAI, to + Coordinated Universal Time, UTC. + + Given: + tai1,tai2 double TAI as a 2-part Julian Date (Note 1) + + Returned: + utc1,utc2 double UTC as a 2-part quasi Julian Date (Notes 1-3) + + Returned (function value): + int status: +1 = dubious year (Note 4) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) tai1+tai2 is Julian Date, apportioned in any convenient way + between the two arguments, for example where tai1 is the Julian + Day Number and tai2 is the fraction of a day. The returned utc1 + and utc2 form an analogous pair, except that a special convention + is used, to deal with the problem of leap seconds - see the next + note. + + 2) JD cannot unambiguously represent UTC during a leap second unless + special measures are taken. The convention in the present + function is that the JD day represents UTC days whether the + length is 86399, 86400 or 86401 SI seconds. In the 1960-1972 era + there were smaller jumps (in either direction) each time the + linear UTC(TAI) expression was changed, and these "mini-leaps" + are also included in the ERFA convention. + + 3) The function eraD2dtf can be used to transform the UTC quasi-JD + into calendar date and clock time, including UTC leap second + handling. + + 4) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the future + to be trusted. See eraDat for further details. + + Called: + eraUtctai UTC to TAI + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tai1_in = numpy.array(tai1, dtype=numpy.double, order="C", copy=False, subok=True) + tai2_in = numpy.array(tai2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tai1_in, tai2_in) + utc1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + utc2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tai1_in, tai2_in, utc1_out, utc2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._taiutc(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'taiutc') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(utc1_out.shape) > 0 and utc1_out.shape[0] == 1 + utc1_out = utc1_out.reshape(utc1_out.shape[1:]) + assert len(utc2_out.shape) > 0 and utc2_out.shape[0] == 1 + utc2_out = utc2_out.reshape(utc2_out.shape[1:]) + + return utc1_out, utc2_out +STATUS_CODES['taiutc'] = {1: 'dubious year (Note 4)', 0: 'OK', -1: 'unacceptable date'} + + + +def tcbtdb(tcb1, tcb2): + """ + Wrapper for ERFA function ``eraTcbtdb``. + + Parameters + ---------- + tcb1 : double array + tcb2 : double array + + Returns + ------- + tdb1 : double array + tdb2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a T c b t d b + - - - - - - - - - - + + Time scale transformation: Barycentric Coordinate Time, TCB, to + Barycentric Dynamical Time, TDB. + + Given: + tcb1,tcb2 double TCB as a 2-part Julian Date + + Returned: + tdb1,tdb2 double TDB as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) tcb1+tcb2 is Julian Date, apportioned in any convenient way + between the two arguments, for example where tcb1 is the Julian + Day Number and tcb2 is the fraction of a day. The returned + tdb1,tdb2 follow suit. + + 2) The 2006 IAU General Assembly introduced a conventional linear + transformation between TDB and TCB. This transformation + compensates for the drift between TCB and terrestrial time TT, + and keeps TDB approximately centered on TT. Because the + relationship between TT and TCB depends on the adopted solar + system ephemeris, the degree of alignment between TDB and TT over + long intervals will vary according to which ephemeris is used. + Former definitions of TDB attempted to avoid this problem by + stipulating that TDB and TT should differ only by periodic + effects. This is a good description of the nature of the + relationship but eluded precise mathematical formulation. The + conventional linear relationship adopted in 2006 sidestepped + these difficulties whilst delivering a TDB that in practice was + consistent with values before that date. + + 3) TDB is essentially the same as Teph, the time argument for the + JPL solar system ephemerides. + + Reference: + + IAU 2006 Resolution B3 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tcb1_in = numpy.array(tcb1, dtype=numpy.double, order="C", copy=False, subok=True) + tcb2_in = numpy.array(tcb2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tcb1_in, tcb2_in) + tdb1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tdb2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tcb1_in, tcb2_in, tdb1_out, tdb2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tcbtdb(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tcbtdb') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tdb1_out.shape) > 0 and tdb1_out.shape[0] == 1 + tdb1_out = tdb1_out.reshape(tdb1_out.shape[1:]) + assert len(tdb2_out.shape) > 0 and tdb2_out.shape[0] == 1 + tdb2_out = tdb2_out.reshape(tdb2_out.shape[1:]) + + return tdb1_out, tdb2_out +STATUS_CODES['tcbtdb'] = {0: 'OK'} + + + +def tcgtt(tcg1, tcg2): + """ + Wrapper for ERFA function ``eraTcgtt``. + + Parameters + ---------- + tcg1 : double array + tcg2 : double array + + Returns + ------- + tt1 : double array + tt2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T c g t t + - - - - - - - - - + + Time scale transformation: Geocentric Coordinate Time, TCG, to + Terrestrial Time, TT. + + Given: + tcg1,tcg2 double TCG as a 2-part Julian Date + + Returned: + tt1,tt2 double TT as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Note: + + tcg1+tcg2 is Julian Date, apportioned in any convenient way + between the two arguments, for example where tcg1 is the Julian + Day Number and tcg22 is the fraction of a day. The returned + tt1,tt2 follow suit. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003),. + IERS Technical Note No. 32, BKG (2004) + + IAU 2000 Resolution B1.9 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tcg1_in = numpy.array(tcg1, dtype=numpy.double, order="C", copy=False, subok=True) + tcg2_in = numpy.array(tcg2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tcg1_in, tcg2_in) + tt1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tt2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tcg1_in, tcg2_in, tt1_out, tt2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tcgtt(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tcgtt') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tt1_out.shape) > 0 and tt1_out.shape[0] == 1 + tt1_out = tt1_out.reshape(tt1_out.shape[1:]) + assert len(tt2_out.shape) > 0 and tt2_out.shape[0] == 1 + tt2_out = tt2_out.reshape(tt2_out.shape[1:]) + + return tt1_out, tt2_out +STATUS_CODES['tcgtt'] = {0: 'OK'} + + + +def tdbtcb(tdb1, tdb2): + """ + Wrapper for ERFA function ``eraTdbtcb``. + + Parameters + ---------- + tdb1 : double array + tdb2 : double array + + Returns + ------- + tcb1 : double array + tcb2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a T d b t c b + - - - - - - - - - - + + Time scale transformation: Barycentric Dynamical Time, TDB, to + Barycentric Coordinate Time, TCB. + + Given: + tdb1,tdb2 double TDB as a 2-part Julian Date + + Returned: + tcb1,tcb2 double TCB as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) tdb1+tdb2 is Julian Date, apportioned in any convenient way + between the two arguments, for example where tdb1 is the Julian + Day Number and tdb2 is the fraction of a day. The returned + tcb1,tcb2 follow suit. + + 2) The 2006 IAU General Assembly introduced a conventional linear + transformation between TDB and TCB. This transformation + compensates for the drift between TCB and terrestrial time TT, + and keeps TDB approximately centered on TT. Because the + relationship between TT and TCB depends on the adopted solar + system ephemeris, the degree of alignment between TDB and TT over + long intervals will vary according to which ephemeris is used. + Former definitions of TDB attempted to avoid this problem by + stipulating that TDB and TT should differ only by periodic + effects. This is a good description of the nature of the + relationship but eluded precise mathematical formulation. The + conventional linear relationship adopted in 2006 sidestepped + these difficulties whilst delivering a TDB that in practice was + consistent with values before that date. + + 3) TDB is essentially the same as Teph, the time argument for the + JPL solar system ephemerides. + + Reference: + + IAU 2006 Resolution B3 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tdb1_in = numpy.array(tdb1, dtype=numpy.double, order="C", copy=False, subok=True) + tdb2_in = numpy.array(tdb2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tdb1_in, tdb2_in) + tcb1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tcb2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tdb1_in, tdb2_in, tcb1_out, tcb2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tdbtcb(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tdbtcb') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tcb1_out.shape) > 0 and tcb1_out.shape[0] == 1 + tcb1_out = tcb1_out.reshape(tcb1_out.shape[1:]) + assert len(tcb2_out.shape) > 0 and tcb2_out.shape[0] == 1 + tcb2_out = tcb2_out.reshape(tcb2_out.shape[1:]) + + return tcb1_out, tcb2_out +STATUS_CODES['tdbtcb'] = {0: 'OK'} + + + +def tdbtt(tdb1, tdb2, dtr): + """ + Wrapper for ERFA function ``eraTdbtt``. + + Parameters + ---------- + tdb1 : double array + tdb2 : double array + dtr : double array + + Returns + ------- + tt1 : double array + tt2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T d b t t + - - - - - - - - - + + Time scale transformation: Barycentric Dynamical Time, TDB, to + Terrestrial Time, TT. + + Given: + tdb1,tdb2 double TDB as a 2-part Julian Date + dtr double TDB-TT in seconds + + Returned: + tt1,tt2 double TT as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) tdb1+tdb2 is Julian Date, apportioned in any convenient way + between the two arguments, for example where tdb1 is the Julian + Day Number and tdb2 is the fraction of a day. The returned + tt1,tt2 follow suit. + + 2) The argument dtr represents the quasi-periodic component of the + GR transformation between TT and TCB. It is dependent upon the + adopted solar-system ephemeris, and can be obtained by numerical + integration, by interrogating a precomputed time ephemeris or by + evaluating a model such as that implemented in the ERFA function + eraDtdb. The quantity is dominated by an annual term of 1.7 ms + amplitude. + + 3) TDB is essentially the same as Teph, the time argument for the + JPL solar system ephemerides. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + IAU 2006 Resolution 3 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tdb1_in = numpy.array(tdb1, dtype=numpy.double, order="C", copy=False, subok=True) + tdb2_in = numpy.array(tdb2, dtype=numpy.double, order="C", copy=False, subok=True) + dtr_in = numpy.array(dtr, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tdb1_in, tdb2_in, dtr_in) + tt1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tt2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tdb1_in, tdb2_in, dtr_in, tt1_out, tt2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tdbtt(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tdbtt') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tt1_out.shape) > 0 and tt1_out.shape[0] == 1 + tt1_out = tt1_out.reshape(tt1_out.shape[1:]) + assert len(tt2_out.shape) > 0 and tt2_out.shape[0] == 1 + tt2_out = tt2_out.reshape(tt2_out.shape[1:]) + + return tt1_out, tt2_out +STATUS_CODES['tdbtt'] = {0: 'OK'} + + + +def tttai(tt1, tt2): + """ + Wrapper for ERFA function ``eraTttai``. + + Parameters + ---------- + tt1 : double array + tt2 : double array + + Returns + ------- + tai1 : double array + tai2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T t t a i + - - - - - - - - - + + Time scale transformation: Terrestrial Time, TT, to International + Atomic Time, TAI. + + Given: + tt1,tt2 double TT as a 2-part Julian Date + + Returned: + tai1,tai2 double TAI as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Note: + + tt1+tt2 is Julian Date, apportioned in any convenient way between + the two arguments, for example where tt1 is the Julian Day Number + and tt2 is the fraction of a day. The returned tai1,tai2 follow + suit. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tt1_in = numpy.array(tt1, dtype=numpy.double, order="C", copy=False, subok=True) + tt2_in = numpy.array(tt2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tt1_in, tt2_in) + tai1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tai2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tt1_in, tt2_in, tai1_out, tai2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tttai(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tttai') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tai1_out.shape) > 0 and tai1_out.shape[0] == 1 + tai1_out = tai1_out.reshape(tai1_out.shape[1:]) + assert len(tai2_out.shape) > 0 and tai2_out.shape[0] == 1 + tai2_out = tai2_out.reshape(tai2_out.shape[1:]) + + return tai1_out, tai2_out +STATUS_CODES['tttai'] = {0: 'OK'} + + + +def tttcg(tt1, tt2): + """ + Wrapper for ERFA function ``eraTttcg``. + + Parameters + ---------- + tt1 : double array + tt2 : double array + + Returns + ------- + tcg1 : double array + tcg2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T t t c g + - - - - - - - - - + + Time scale transformation: Terrestrial Time, TT, to Geocentric + Coordinate Time, TCG. + + Given: + tt1,tt2 double TT as a 2-part Julian Date + + Returned: + tcg1,tcg2 double TCG as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Note: + + tt1+tt2 is Julian Date, apportioned in any convenient way between + the two arguments, for example where tt1 is the Julian Day Number + and tt2 is the fraction of a day. The returned tcg1,tcg2 follow + suit. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + IAU 2000 Resolution B1.9 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tt1_in = numpy.array(tt1, dtype=numpy.double, order="C", copy=False, subok=True) + tt2_in = numpy.array(tt2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tt1_in, tt2_in) + tcg1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tcg2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tt1_in, tt2_in, tcg1_out, tcg2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tttcg(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tttcg') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tcg1_out.shape) > 0 and tcg1_out.shape[0] == 1 + tcg1_out = tcg1_out.reshape(tcg1_out.shape[1:]) + assert len(tcg2_out.shape) > 0 and tcg2_out.shape[0] == 1 + tcg2_out = tcg2_out.reshape(tcg2_out.shape[1:]) + + return tcg1_out, tcg2_out +STATUS_CODES['tttcg'] = {0: 'OK'} + + + +def tttdb(tt1, tt2, dtr): + """ + Wrapper for ERFA function ``eraTttdb``. + + Parameters + ---------- + tt1 : double array + tt2 : double array + dtr : double array + + Returns + ------- + tdb1 : double array + tdb2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T t t d b + - - - - - - - - - + + Time scale transformation: Terrestrial Time, TT, to Barycentric + Dynamical Time, TDB. + + Given: + tt1,tt2 double TT as a 2-part Julian Date + dtr double TDB-TT in seconds + + Returned: + tdb1,tdb2 double TDB as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) tt1+tt2 is Julian Date, apportioned in any convenient way between + the two arguments, for example where tt1 is the Julian Day Number + and tt2 is the fraction of a day. The returned tdb1,tdb2 follow + suit. + + 2) The argument dtr represents the quasi-periodic component of the + GR transformation between TT and TCB. It is dependent upon the + adopted solar-system ephemeris, and can be obtained by numerical + integration, by interrogating a precomputed time ephemeris or by + evaluating a model such as that implemented in the ERFA function + eraDtdb. The quantity is dominated by an annual term of 1.7 ms + amplitude. + + 3) TDB is essentially the same as Teph, the time argument for the JPL + solar system ephemerides. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + IAU 2006 Resolution 3 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tt1_in = numpy.array(tt1, dtype=numpy.double, order="C", copy=False, subok=True) + tt2_in = numpy.array(tt2, dtype=numpy.double, order="C", copy=False, subok=True) + dtr_in = numpy.array(dtr, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tt1_in, tt2_in, dtr_in) + tdb1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tdb2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tt1_in, tt2_in, dtr_in, tdb1_out, tdb2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tttdb(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tttdb') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tdb1_out.shape) > 0 and tdb1_out.shape[0] == 1 + tdb1_out = tdb1_out.reshape(tdb1_out.shape[1:]) + assert len(tdb2_out.shape) > 0 and tdb2_out.shape[0] == 1 + tdb2_out = tdb2_out.reshape(tdb2_out.shape[1:]) + + return tdb1_out, tdb2_out +STATUS_CODES['tttdb'] = {0: 'OK'} + + + +def ttut1(tt1, tt2, dt): + """ + Wrapper for ERFA function ``eraTtut1``. + + Parameters + ---------- + tt1 : double array + tt2 : double array + dt : double array + + Returns + ------- + ut11 : double array + ut12 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T t u t 1 + - - - - - - - - - + + Time scale transformation: Terrestrial Time, TT, to Universal Time, + UT1. + + Given: + tt1,tt2 double TT as a 2-part Julian Date + dt double TT-UT1 in seconds + + Returned: + ut11,ut12 double UT1 as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) tt1+tt2 is Julian Date, apportioned in any convenient way between + the two arguments, for example where tt1 is the Julian Day Number + and tt2 is the fraction of a day. The returned ut11,ut12 follow + suit. + + 2) The argument dt is classical Delta T. + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + tt1_in = numpy.array(tt1, dtype=numpy.double, order="C", copy=False, subok=True) + tt2_in = numpy.array(tt2, dtype=numpy.double, order="C", copy=False, subok=True) + dt_in = numpy.array(dt, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), tt1_in, tt2_in, dt_in) + ut11_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + ut12_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [tt1_in, tt2_in, dt_in, ut11_out, ut12_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ttut1(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'ttut1') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ut11_out.shape) > 0 and ut11_out.shape[0] == 1 + ut11_out = ut11_out.reshape(ut11_out.shape[1:]) + assert len(ut12_out.shape) > 0 and ut12_out.shape[0] == 1 + ut12_out = ut12_out.reshape(ut12_out.shape[1:]) + + return ut11_out, ut12_out +STATUS_CODES['ttut1'] = {0: 'OK'} + + + +def ut1tai(ut11, ut12, dta): + """ + Wrapper for ERFA function ``eraUt1tai``. + + Parameters + ---------- + ut11 : double array + ut12 : double array + dta : double array + + Returns + ------- + tai1 : double array + tai2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a U t 1 t a i + - - - - - - - - - - + + Time scale transformation: Universal Time, UT1, to International + Atomic Time, TAI. + + Given: + ut11,ut12 double UT1 as a 2-part Julian Date + dta double UT1-TAI in seconds + + Returned: + tai1,tai2 double TAI as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) ut11+ut12 is Julian Date, apportioned in any convenient way + between the two arguments, for example where ut11 is the Julian + Day Number and ut12 is the fraction of a day. The returned + tai1,tai2 follow suit. + + 2) The argument dta, i.e. UT1-TAI, is an observed quantity, and is + available from IERS tabulations. + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ut11_in = numpy.array(ut11, dtype=numpy.double, order="C", copy=False, subok=True) + ut12_in = numpy.array(ut12, dtype=numpy.double, order="C", copy=False, subok=True) + dta_in = numpy.array(dta, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ut11_in, ut12_in, dta_in) + tai1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tai2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ut11_in, ut12_in, dta_in, tai1_out, tai2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ut1tai(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'ut1tai') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tai1_out.shape) > 0 and tai1_out.shape[0] == 1 + tai1_out = tai1_out.reshape(tai1_out.shape[1:]) + assert len(tai2_out.shape) > 0 and tai2_out.shape[0] == 1 + tai2_out = tai2_out.reshape(tai2_out.shape[1:]) + + return tai1_out, tai2_out +STATUS_CODES['ut1tai'] = {0: 'OK'} + + + +def ut1tt(ut11, ut12, dt): + """ + Wrapper for ERFA function ``eraUt1tt``. + + Parameters + ---------- + ut11 : double array + ut12 : double array + dt : double array + + Returns + ------- + tt1 : double array + tt2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a U t 1 t t + - - - - - - - - - + + Time scale transformation: Universal Time, UT1, to Terrestrial + Time, TT. + + Given: + ut11,ut12 double UT1 as a 2-part Julian Date + dt double TT-UT1 in seconds + + Returned: + tt1,tt2 double TT as a 2-part Julian Date + + Returned (function value): + int status: 0 = OK + + Notes: + + 1) ut11+ut12 is Julian Date, apportioned in any convenient way + between the two arguments, for example where ut11 is the Julian + Day Number and ut12 is the fraction of a day. The returned + tt1,tt2 follow suit. + + 2) The argument dt is classical Delta T. + + Reference: + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ut11_in = numpy.array(ut11, dtype=numpy.double, order="C", copy=False, subok=True) + ut12_in = numpy.array(ut12, dtype=numpy.double, order="C", copy=False, subok=True) + dt_in = numpy.array(dt, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ut11_in, ut12_in, dt_in) + tt1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tt2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ut11_in, ut12_in, dt_in, tt1_out, tt2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ut1tt(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'ut1tt') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tt1_out.shape) > 0 and tt1_out.shape[0] == 1 + tt1_out = tt1_out.reshape(tt1_out.shape[1:]) + assert len(tt2_out.shape) > 0 and tt2_out.shape[0] == 1 + tt2_out = tt2_out.reshape(tt2_out.shape[1:]) + + return tt1_out, tt2_out +STATUS_CODES['ut1tt'] = {0: 'OK'} + + + +def ut1utc(ut11, ut12, dut1): + """ + Wrapper for ERFA function ``eraUt1utc``. + + Parameters + ---------- + ut11 : double array + ut12 : double array + dut1 : double array + + Returns + ------- + utc1 : double array + utc2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a U t 1 u t c + - - - - - - - - - - + + Time scale transformation: Universal Time, UT1, to Coordinated + Universal Time, UTC. + + Given: + ut11,ut12 double UT1 as a 2-part Julian Date (Note 1) + dut1 double Delta UT1: UT1-UTC in seconds (Note 2) + + Returned: + utc1,utc2 double UTC as a 2-part quasi Julian Date (Notes 3,4) + + Returned (function value): + int status: +1 = dubious year (Note 5) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) ut11+ut12 is Julian Date, apportioned in any convenient way + between the two arguments, for example where ut11 is the Julian + Day Number and ut12 is the fraction of a day. The returned utc1 + and utc2 form an analogous pair, except that a special convention + is used, to deal with the problem of leap seconds - see Note 3. + + 2) Delta UT1 can be obtained from tabulations provided by the + International Earth Rotation and Reference Systems Service. The + value changes abruptly by 1s at a leap second; however, close to + a leap second the algorithm used here is tolerant of the "wrong" + choice of value being made. + + 3) JD cannot unambiguously represent UTC during a leap second unless + special measures are taken. The convention in the present + function is that the returned quasi JD day UTC1+UTC2 represents + UTC days whether the length is 86399, 86400 or 86401 SI seconds. + + 4) The function eraD2dtf can be used to transform the UTC quasi-JD + into calendar date and clock time, including UTC leap second + handling. + + 5) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the future + to be trusted. See eraDat for further details. + + Called: + eraJd2cal JD to Gregorian calendar + eraDat delta(AT) = TAI-UTC + eraCal2jd Gregorian calendar to JD + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ut11_in = numpy.array(ut11, dtype=numpy.double, order="C", copy=False, subok=True) + ut12_in = numpy.array(ut12, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ut11_in, ut12_in, dut1_in) + utc1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + utc2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ut11_in, ut12_in, dut1_in, utc1_out, utc2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._ut1utc(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'ut1utc') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(utc1_out.shape) > 0 and utc1_out.shape[0] == 1 + utc1_out = utc1_out.reshape(utc1_out.shape[1:]) + assert len(utc2_out.shape) > 0 and utc2_out.shape[0] == 1 + utc2_out = utc2_out.reshape(utc2_out.shape[1:]) + + return utc1_out, utc2_out +STATUS_CODES['ut1utc'] = {1: 'dubious year (Note 5)', 0: 'OK', -1: 'unacceptable date'} + + + +def utctai(utc1, utc2): + """ + Wrapper for ERFA function ``eraUtctai``. + + Parameters + ---------- + utc1 : double array + utc2 : double array + + Returns + ------- + tai1 : double array + tai2 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a U t c t a i + - - - - - - - - - - + + Time scale transformation: Coordinated Universal Time, UTC, to + International Atomic Time, TAI. + + Given: + utc1,utc2 double UTC as a 2-part quasi Julian Date (Notes 1-4) + + Returned: + tai1,tai2 double TAI as a 2-part Julian Date (Note 5) + + Returned (function value): + int status: +1 = dubious year (Note 3) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + 2) JD cannot unambiguously represent UTC during a leap second unless + special measures are taken. The convention in the present + function is that the JD day represents UTC days whether the + length is 86399, 86400 or 86401 SI seconds. In the 1960-1972 era + there were smaller jumps (in either direction) each time the + linear UTC(TAI) expression was changed, and these "mini-leaps" + are also included in the ERFA convention. + + 3) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the future + to be trusted. See eraDat for further details. + + 4) The function eraDtf2d converts from calendar date and time of day + into 2-part Julian Date, and in the case of UTC implements the + leap-second-ambiguity convention described above. + + 5) The returned TAI1,TAI2 are such that their sum is the TAI Julian + Date. + + Called: + eraJd2cal JD to Gregorian calendar + eraDat delta(AT) = TAI-UTC + eraCal2jd Gregorian calendar to JD + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), utc1_in, utc2_in) + tai1_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + tai2_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [utc1_in, utc2_in, tai1_out, tai2_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._utctai(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'utctai') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(tai1_out.shape) > 0 and tai1_out.shape[0] == 1 + tai1_out = tai1_out.reshape(tai1_out.shape[1:]) + assert len(tai2_out.shape) > 0 and tai2_out.shape[0] == 1 + tai2_out = tai2_out.reshape(tai2_out.shape[1:]) + + return tai1_out, tai2_out +STATUS_CODES['utctai'] = {1: 'dubious year (Note 3)', 0: 'OK', -1: 'unacceptable date'} + + + +def utcut1(utc1, utc2, dut1): + """ + Wrapper for ERFA function ``eraUtcut1``. + + Parameters + ---------- + utc1 : double array + utc2 : double array + dut1 : double array + + Returns + ------- + ut11 : double array + ut12 : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - - + e r a U t c u t 1 + - - - - - - - - - - + + Time scale transformation: Coordinated Universal Time, UTC, to + Universal Time, UT1. + + Given: + utc1,utc2 double UTC as a 2-part quasi Julian Date (Notes 1-4) + dut1 double Delta UT1 = UT1-UTC in seconds (Note 5) + + Returned: + ut11,ut12 double UT1 as a 2-part Julian Date (Note 6) + + Returned (function value): + int status: +1 = dubious year (Note 3) + 0 = OK + -1 = unacceptable date + + Notes: + + 1) utc1+utc2 is quasi Julian Date (see Note 2), apportioned in any + convenient way between the two arguments, for example where utc1 + is the Julian Day Number and utc2 is the fraction of a day. + + 2) JD cannot unambiguously represent UTC during a leap second unless + special measures are taken. The convention in the present + function is that the JD day represents UTC days whether the + length is 86399, 86400 or 86401 SI seconds. + + 3) The warning status "dubious year" flags UTCs that predate the + introduction of the time scale or that are too far in the future + to be trusted. See eraDat for further details. + + 4) The function eraDtf2d converts from calendar date and time of + day into 2-part Julian Date, and in the case of UTC implements + the leap-second-ambiguity convention described above. + + 5) Delta UT1 can be obtained from tabulations provided by the + International Earth Rotation and Reference Systems Service. + It is the caller's responsibility to supply a dut1 argument + containing the UT1-UTC value that matches the given UTC. + + 6) The returned ut11,ut12 are such that their sum is the UT1 Julian + Date. + + References: + + McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003), + IERS Technical Note No. 32, BKG (2004) + + Explanatory Supplement to the Astronomical Almanac, + P. Kenneth Seidelmann (ed), University Science Books (1992) + + Called: + eraJd2cal JD to Gregorian calendar + eraDat delta(AT) = TAI-UTC + eraUtctai UTC to TAI + eraTaiut1 TAI to UT1 + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + utc1_in = numpy.array(utc1, dtype=numpy.double, order="C", copy=False, subok=True) + utc2_in = numpy.array(utc2, dtype=numpy.double, order="C", copy=False, subok=True) + dut1_in = numpy.array(dut1, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), utc1_in, utc2_in, dut1_in) + ut11_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + ut12_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [utc1_in, utc2_in, dut1_in, ut11_out, ut12_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._utcut1(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'utcut1') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(ut11_out.shape) > 0 and ut11_out.shape[0] == 1 + ut11_out = ut11_out.reshape(ut11_out.shape[1:]) + assert len(ut12_out.shape) > 0 and ut12_out.shape[0] == 1 + ut12_out = ut12_out.reshape(ut12_out.shape[1:]) + + return ut11_out, ut12_out +STATUS_CODES['utcut1'] = {1: 'dubious year (Note 3)', 0: 'OK', -1: 'unacceptable date'} + + + +def a2af(ndp, angle): + """ + Wrapper for ERFA function ``eraA2af``. + + Parameters + ---------- + ndp : int array + angle : double array + + Returns + ------- + sign : char array + idmsf : int array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A 2 a f + - - - - - - - - + + Decompose radians into degrees, arcminutes, arcseconds, fraction. + + Given: + ndp int resolution (Note 1) + angle double angle in radians + + Returned: + sign char '+' or '-' + idmsf int[4] degrees, arcminutes, arcseconds, fraction + + Called: + eraD2tf decompose days to hms + + Notes: + + 1) The argument ndp is interpreted as follows: + + ndp resolution + : ...0000 00 00 + -7 1000 00 00 + -6 100 00 00 + -5 10 00 00 + -4 1 00 00 + -3 0 10 00 + -2 0 01 00 + -1 0 00 10 + 0 0 00 01 + 1 0 00 00.1 + 2 0 00 00.01 + 3 0 00 00.001 + : 0 00 00.000... + + 2) The largest positive useful value for ndp is determined by the + size of angle, the format of doubles on the target platform, and + the risk of overflowing idmsf[3]. On a typical platform, for + angle up to 2pi, the available floating-point precision might + correspond to ndp=12. However, the practical limit is typically + ndp=9, set by the capacity of a 32-bit int, or ndp=4 if int is + only 16 bits. + + 3) The absolute value of angle may exceed 2pi. In cases where it + does not, it is up to the caller to test for and handle the + case where angle is very nearly 2pi and rounds up to 360 degrees, + by testing for idmsf[0]=360 and setting idmsf[0-3] to zero. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ndp_in = numpy.array(ndp, dtype=numpy.intc, order="C", copy=False, subok=True) + angle_in = numpy.array(angle, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ndp_in, angle_in) + sign_out = numpy.empty(broadcast.shape + (), dtype=numpy.dtype('S1')) + idmsf_out = numpy.empty(broadcast.shape + (4,), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ndp_in, angle_in, sign_out, idmsf_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._a2af(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(sign_out.shape) > 0 and sign_out.shape[0] == 1 + sign_out = sign_out.reshape(sign_out.shape[1:]) + assert len(idmsf_out.shape) > 0 and idmsf_out.shape[0] == 1 + idmsf_out = idmsf_out.reshape(idmsf_out.shape[1:]) + + return sign_out, idmsf_out + + +def a2tf(ndp, angle): + """ + Wrapper for ERFA function ``eraA2tf``. + + Parameters + ---------- + ndp : int array + angle : double array + + Returns + ------- + sign : char array + ihmsf : int array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A 2 t f + - - - - - - - - + + Decompose radians into hours, minutes, seconds, fraction. + + Given: + ndp int resolution (Note 1) + angle double angle in radians + + Returned: + sign char '+' or '-' + ihmsf int[4] hours, minutes, seconds, fraction + + Called: + eraD2tf decompose days to hms + + Notes: + + 1) The argument ndp is interpreted as follows: + + ndp resolution + : ...0000 00 00 + -7 1000 00 00 + -6 100 00 00 + -5 10 00 00 + -4 1 00 00 + -3 0 10 00 + -2 0 01 00 + -1 0 00 10 + 0 0 00 01 + 1 0 00 00.1 + 2 0 00 00.01 + 3 0 00 00.001 + : 0 00 00.000... + + 2) The largest positive useful value for ndp is determined by the + size of angle, the format of doubles on the target platform, and + the risk of overflowing ihmsf[3]. On a typical platform, for + angle up to 2pi, the available floating-point precision might + correspond to ndp=12. However, the practical limit is typically + ndp=9, set by the capacity of a 32-bit int, or ndp=4 if int is + only 16 bits. + + 3) The absolute value of angle may exceed 2pi. In cases where it + does not, it is up to the caller to test for and handle the + case where angle is very nearly 2pi and rounds up to 24 hours, + by testing for ihmsf[0]=24 and setting ihmsf[0-3] to zero. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ndp_in = numpy.array(ndp, dtype=numpy.intc, order="C", copy=False, subok=True) + angle_in = numpy.array(angle, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ndp_in, angle_in) + sign_out = numpy.empty(broadcast.shape + (), dtype=numpy.dtype('S1')) + ihmsf_out = numpy.empty(broadcast.shape + (4,), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ndp_in, angle_in, sign_out, ihmsf_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._a2tf(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(sign_out.shape) > 0 and sign_out.shape[0] == 1 + sign_out = sign_out.reshape(sign_out.shape[1:]) + assert len(ihmsf_out.shape) > 0 and ihmsf_out.shape[0] == 1 + ihmsf_out = ihmsf_out.reshape(ihmsf_out.shape[1:]) + + return sign_out, ihmsf_out + + +def af2a(s, ideg, iamin, asec): + """ + Wrapper for ERFA function ``eraAf2a``. + + Parameters + ---------- + s : char array + ideg : int array + iamin : int array + asec : double array + + Returns + ------- + rad : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A f 2 a + - - - - - - - - + + Convert degrees, arcminutes, arcseconds to radians. + + Given: + s char sign: '-' = negative, otherwise positive + ideg int degrees + iamin int arcminutes + asec double arcseconds + + Returned: + rad double angle in radians + + Returned (function value): + int status: 0 = OK + 1 = ideg outside range 0-359 + 2 = iamin outside range 0-59 + 3 = asec outside range 0-59.999... + + Notes: + + 1) The result is computed even if any of the range checks fail. + + 2) Negative ideg, iamin and/or asec produce a warning status, but + the absolute value is used in the conversion. + + 3) If there are multiple errors, the status value reflects only the + first, the smallest taking precedence. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + s_in = numpy.array(s, dtype=numpy.dtype('S1'), order="C", copy=False, subok=True) + ideg_in = numpy.array(ideg, dtype=numpy.intc, order="C", copy=False, subok=True) + iamin_in = numpy.array(iamin, dtype=numpy.intc, order="C", copy=False, subok=True) + asec_in = numpy.array(asec, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), s_in, ideg_in, iamin_in, asec_in) + rad_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [s_in, ideg_in, iamin_in, asec_in, rad_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._af2a(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'af2a') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rad_out.shape) > 0 and rad_out.shape[0] == 1 + rad_out = rad_out.reshape(rad_out.shape[1:]) + + return rad_out +STATUS_CODES['af2a'] = {0: 'OK', 1: 'ideg outside range 0-359', 2: 'iamin outside range 0-59', 3: 'asec outside range 0-59.999...'} + + + +def anp(a): + """ + Wrapper for ERFA function ``eraAnp``. + + Parameters + ---------- + a : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a A n p + - - - - - - - + + Normalize angle into the range 0 <= a < 2pi. + + Given: + a double angle (radians) + + Returned (function value): + double angle in range 0-2pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + a_in = numpy.array(a, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), a_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [a_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._anp(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def anpm(a): + """ + Wrapper for ERFA function ``eraAnpm``. + + Parameters + ---------- + a : double array + + Returns + ------- + c_retval : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a A n p m + - - - - - - - - + + Normalize angle into the range -pi <= a < +pi. + + Given: + a double angle (radians) + + Returned (function value): + double angle in range +/-pi + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + a_in = numpy.array(a, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), a_in) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [a_in, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._anpm(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_retval_out.shape) > 0 and c_retval_out.shape[0] == 1 + c_retval_out = c_retval_out.reshape(c_retval_out.shape[1:]) + + return c_retval_out + + +def d2tf(ndp, days): + """ + Wrapper for ERFA function ``eraD2tf``. + + Parameters + ---------- + ndp : int array + days : double array + + Returns + ------- + sign : char array + ihmsf : int array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a D 2 t f + - - - - - - - - + + Decompose days to hours, minutes, seconds, fraction. + + Given: + ndp int resolution (Note 1) + days double interval in days + + Returned: + sign char '+' or '-' + ihmsf int[4] hours, minutes, seconds, fraction + + Notes: + + 1) The argument ndp is interpreted as follows: + + ndp resolution + : ...0000 00 00 + -7 1000 00 00 + -6 100 00 00 + -5 10 00 00 + -4 1 00 00 + -3 0 10 00 + -2 0 01 00 + -1 0 00 10 + 0 0 00 01 + 1 0 00 00.1 + 2 0 00 00.01 + 3 0 00 00.001 + : 0 00 00.000... + + 2) The largest positive useful value for ndp is determined by the + size of days, the format of double on the target platform, and + the risk of overflowing ihmsf[3]. On a typical platform, for + days up to 1.0, the available floating-point precision might + correspond to ndp=12. However, the practical limit is typically + ndp=9, set by the capacity of a 32-bit int, or ndp=4 if int is + only 16 bits. + + 3) The absolute value of days may exceed 1.0. In cases where it + does not, it is up to the caller to test for and handle the + case where days is very nearly 1.0 and rounds up to 24 hours, + by testing for ihmsf[0]=24 and setting ihmsf[0-3] to zero. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + ndp_in = numpy.array(ndp, dtype=numpy.intc, order="C", copy=False, subok=True) + days_in = numpy.array(days, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), ndp_in, days_in) + sign_out = numpy.empty(broadcast.shape + (), dtype=numpy.dtype('S1')) + ihmsf_out = numpy.empty(broadcast.shape + (4,), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [ndp_in, days_in, sign_out, ihmsf_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._d2tf(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(sign_out.shape) > 0 and sign_out.shape[0] == 1 + sign_out = sign_out.reshape(sign_out.shape[1:]) + assert len(ihmsf_out.shape) > 0 and ihmsf_out.shape[0] == 1 + ihmsf_out = ihmsf_out.reshape(ihmsf_out.shape[1:]) + + return sign_out, ihmsf_out + + +def tf2a(s, ihour, imin, sec): + """ + Wrapper for ERFA function ``eraTf2a``. + + Parameters + ---------- + s : char array + ihour : int array + imin : int array + sec : double array + + Returns + ------- + rad : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a T f 2 a + - - - - - - - - + + Convert hours, minutes, seconds to radians. + + Given: + s char sign: '-' = negative, otherwise positive + ihour int hours + imin int minutes + sec double seconds + + Returned: + rad double angle in radians + + Returned (function value): + int status: 0 = OK + 1 = ihour outside range 0-23 + 2 = imin outside range 0-59 + 3 = sec outside range 0-59.999... + + Notes: + + 1) The result is computed even if any of the range checks fail. + + 2) Negative ihour, imin and/or sec produce a warning status, but + the absolute value is used in the conversion. + + 3) If there are multiple errors, the status value reflects only the + first, the smallest taking precedence. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + s_in = numpy.array(s, dtype=numpy.dtype('S1'), order="C", copy=False, subok=True) + ihour_in = numpy.array(ihour, dtype=numpy.intc, order="C", copy=False, subok=True) + imin_in = numpy.array(imin, dtype=numpy.intc, order="C", copy=False, subok=True) + sec_in = numpy.array(sec, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), s_in, ihour_in, imin_in, sec_in) + rad_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [s_in, ihour_in, imin_in, sec_in, rad_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tf2a(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tf2a') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rad_out.shape) > 0 and rad_out.shape[0] == 1 + rad_out = rad_out.reshape(rad_out.shape[1:]) + + return rad_out +STATUS_CODES['tf2a'] = {0: 'OK', 1: 'ihour outside range 0-23', 2: 'imin outside range 0-59', 3: 'sec outside range 0-59.999...'} + + + +def tf2d(s, ihour, imin, sec): + """ + Wrapper for ERFA function ``eraTf2d``. + + Parameters + ---------- + s : char array + ihour : int array + imin : int array + sec : double array + + Returns + ------- + days : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a T f 2 d + - - - - - - - - + + Convert hours, minutes, seconds to days. + + Given: + s char sign: '-' = negative, otherwise positive + ihour int hours + imin int minutes + sec double seconds + + Returned: + days double interval in days + + Returned (function value): + int status: 0 = OK + 1 = ihour outside range 0-23 + 2 = imin outside range 0-59 + 3 = sec outside range 0-59.999... + + Notes: + + 1) The result is computed even if any of the range checks fail. + + 2) Negative ihour, imin and/or sec produce a warning status, but + the absolute value is used in the conversion. + + 3) If there are multiple errors, the status value reflects only the + first, the smallest taking precedence. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + s_in = numpy.array(s, dtype=numpy.dtype('S1'), order="C", copy=False, subok=True) + ihour_in = numpy.array(ihour, dtype=numpy.intc, order="C", copy=False, subok=True) + imin_in = numpy.array(imin, dtype=numpy.intc, order="C", copy=False, subok=True) + sec_in = numpy.array(sec, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), s_in, ihour_in, imin_in, sec_in) + days_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + c_retval_out = numpy.empty(broadcast.shape + (), dtype=numpy.intc) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [s_in, ihour_in, imin_in, sec_in, days_out, c_retval_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*4 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._tf2d(it) + + if not stat_ok: + check_errwarn(c_retval_out, 'tf2d') + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(days_out.shape) > 0 and days_out.shape[0] == 1 + days_out = days_out.reshape(days_out.shape[1:]) + + return days_out +STATUS_CODES['tf2d'] = {0: 'OK', 1: 'ihour outside range 0-23', 2: 'imin outside range 0-59', 3: 'sec outside range 0-59.999...'} + + + +def rxp(r, p): + """ + Wrapper for ERFA function ``eraRxp``. + + Parameters + ---------- + r : double array + p : double array + + Returns + ------- + rp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a R x p + - - - - - - - + + Multiply a p-vector by an r-matrix. + + Given: + r double[3][3] r-matrix + p double[3] p-vector + + Returned: + rp double[3] r * p + + Note: + It is permissible for p and rp to be the same array. + + Called: + eraCp copy p-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + r_in = numpy.array(r, dtype=numpy.double, order="C", copy=False, subok=True) + p_in = numpy.array(p, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(r_in, (3, 3), "r") + check_trailing_shape(p_in, (3,), "p") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), r_in[...,0,0], p_in[...,0]) + rp_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [r_in[...,0,0], p_in[...,0], rp_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._rxp(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rp_out.shape) > 0 and rp_out.shape[0] == 1 + rp_out = rp_out.reshape(rp_out.shape[1:]) + + return rp_out + + +def rxpv(r, pv): + """ + Wrapper for ERFA function ``eraRxpv``. + + Parameters + ---------- + r : double array + pv : double array + + Returns + ------- + rpv : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a R x p v + - - - - - - - - + + Multiply a pv-vector by an r-matrix. + + Given: + r double[3][3] r-matrix + pv double[2][3] pv-vector + + Returned: + rpv double[2][3] r * pv + + Note: + It is permissible for pv and rpv to be the same array. + + Called: + eraRxp product of r-matrix and p-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + r_in = numpy.array(r, dtype=numpy.double, order="C", copy=False, subok=True) + pv_in = numpy.array(pv, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(r_in, (3, 3), "r") + check_trailing_shape(pv_in, (2, 3), "pv") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), r_in[...,0,0], pv_in[...,0,0]) + rpv_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [r_in[...,0,0], pv_in[...,0,0], rpv_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._rxpv(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(rpv_out.shape) > 0 and rpv_out.shape[0] == 1 + rpv_out = rpv_out.reshape(rpv_out.shape[1:]) + + return rpv_out + + +def trxp(r, p): + """ + Wrapper for ERFA function ``eraTrxp``. + + Parameters + ---------- + r : double array + p : double array + + Returns + ------- + trp : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a T r x p + - - - - - - - - + + Multiply a p-vector by the transpose of an r-matrix. + + Given: + r double[3][3] r-matrix + p double[3] p-vector + + Returned: + trp double[3] r * p + + Note: + It is permissible for p and trp to be the same array. + + Called: + eraTr transpose r-matrix + eraRxp product of r-matrix and p-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + r_in = numpy.array(r, dtype=numpy.double, order="C", copy=False, subok=True) + p_in = numpy.array(p, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(r_in, (3, 3), "r") + check_trailing_shape(p_in, (3,), "p") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), r_in[...,0,0], p_in[...,0]) + trp_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [r_in[...,0,0], p_in[...,0], trp_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._trxp(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(trp_out.shape) > 0 and trp_out.shape[0] == 1 + trp_out = trp_out.reshape(trp_out.shape[1:]) + + return trp_out + + +def trxpv(r, pv): + """ + Wrapper for ERFA function ``eraTrxpv``. + + Parameters + ---------- + r : double array + pv : double array + + Returns + ------- + trpv : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - - + e r a T r x p v + - - - - - - - - - + + Multiply a pv-vector by the transpose of an r-matrix. + + Given: + r double[3][3] r-matrix + pv double[2][3] pv-vector + + Returned: + trpv double[2][3] r * pv + + Note: + It is permissible for pv and trpv to be the same array. + + Called: + eraTr transpose r-matrix + eraRxpv product of r-matrix and pv-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + r_in = numpy.array(r, dtype=numpy.double, order="C", copy=False, subok=True) + pv_in = numpy.array(pv, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(r_in, (3, 3), "r") + check_trailing_shape(pv_in, (2, 3), "pv") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), r_in[...,0,0], pv_in[...,0,0]) + trpv_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [r_in[...,0,0], pv_in[...,0,0], trpv_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._trxpv(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(trpv_out.shape) > 0 and trpv_out.shape[0] == 1 + trpv_out = trpv_out.reshape(trpv_out.shape[1:]) + + return trpv_out + + +def c2s(p): + """ + Wrapper for ERFA function ``eraC2s``. + + Parameters + ---------- + p : double array + + Returns + ------- + theta : double array + phi : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a C 2 s + - - - - - - - + + P-vector to spherical coordinates. + + Given: + p double[3] p-vector + + Returned: + theta double longitude angle (radians) + phi double latitude angle (radians) + + Notes: + + 1) The vector p can have any magnitude; only its direction is used. + + 2) If p is null, zero theta and phi are returned. + + 3) At either pole, zero theta is returned. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + p_in = numpy.array(p, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(p_in, (3,), "p") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), p_in[...,0]) + theta_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + phi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [p_in[...,0], theta_out, phi_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*2 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._c2s(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(theta_out.shape) > 0 and theta_out.shape[0] == 1 + theta_out = theta_out.reshape(theta_out.shape[1:]) + assert len(phi_out.shape) > 0 and phi_out.shape[0] == 1 + phi_out = phi_out.reshape(phi_out.shape[1:]) + + return theta_out, phi_out + + +def p2s(p): + """ + Wrapper for ERFA function ``eraP2s``. + + Parameters + ---------- + p : double array + + Returns + ------- + theta : double array + phi : double array + r : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a P 2 s + - - - - - - - + + P-vector to spherical polar coordinates. + + Given: + p double[3] p-vector + + Returned: + theta double longitude angle (radians) + phi double latitude angle (radians) + r double radial distance + + Notes: + + 1) If P is null, zero theta, phi and r are returned. + + 2) At either pole, zero theta is returned. + + Called: + eraC2s p-vector to spherical + eraPm modulus of p-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + p_in = numpy.array(p, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(p_in, (3,), "p") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), p_in[...,0]) + theta_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + phi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + r_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [p_in[...,0], theta_out, phi_out, r_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*3 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._p2s(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(theta_out.shape) > 0 and theta_out.shape[0] == 1 + theta_out = theta_out.reshape(theta_out.shape[1:]) + assert len(phi_out.shape) > 0 and phi_out.shape[0] == 1 + phi_out = phi_out.reshape(phi_out.shape[1:]) + assert len(r_out.shape) > 0 and r_out.shape[0] == 1 + r_out = r_out.reshape(r_out.shape[1:]) + + return theta_out, phi_out, r_out + + +def pv2s(pv): + """ + Wrapper for ERFA function ``eraPv2s``. + + Parameters + ---------- + pv : double array + + Returns + ------- + theta : double array + phi : double array + r : double array + td : double array + pd : double array + rd : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a P v 2 s + - - - - - - - - + + Convert position/velocity from Cartesian to spherical coordinates. + + Given: + pv double[2][3] pv-vector + + Returned: + theta double longitude angle (radians) + phi double latitude angle (radians) + r double radial distance + td double rate of change of theta + pd double rate of change of phi + rd double rate of change of r + + Notes: + + 1) If the position part of pv is null, theta, phi, td and pd + are indeterminate. This is handled by extrapolating the + position through unit time by using the velocity part of + pv. This moves the origin without changing the direction + of the velocity component. If the position and velocity + components of pv are both null, zeroes are returned for all + six results. + + 2) If the position is a pole, theta, td and pd are indeterminate. + In such cases zeroes are returned for all three. + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + pv_in = numpy.array(pv, dtype=numpy.double, order="C", copy=False, subok=True) + check_trailing_shape(pv_in, (2, 3), "pv") + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), pv_in[...,0,0]) + theta_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + phi_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + r_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + td_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + pd_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + rd_out = numpy.empty(broadcast.shape + (), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [pv_in[...,0,0], theta_out, phi_out, r_out, td_out, pd_out, rd_out] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*1 + [['readwrite']]*6 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._pv2s(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(theta_out.shape) > 0 and theta_out.shape[0] == 1 + theta_out = theta_out.reshape(theta_out.shape[1:]) + assert len(phi_out.shape) > 0 and phi_out.shape[0] == 1 + phi_out = phi_out.reshape(phi_out.shape[1:]) + assert len(r_out.shape) > 0 and r_out.shape[0] == 1 + r_out = r_out.reshape(r_out.shape[1:]) + assert len(td_out.shape) > 0 and td_out.shape[0] == 1 + td_out = td_out.reshape(td_out.shape[1:]) + assert len(pd_out.shape) > 0 and pd_out.shape[0] == 1 + pd_out = pd_out.reshape(pd_out.shape[1:]) + assert len(rd_out.shape) > 0 and rd_out.shape[0] == 1 + rd_out = rd_out.reshape(rd_out.shape[1:]) + + return theta_out, phi_out, r_out, td_out, pd_out, rd_out + + +def s2c(theta, phi): + """ + Wrapper for ERFA function ``eraS2c``. + + Parameters + ---------- + theta : double array + phi : double array + + Returns + ------- + c : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a S 2 c + - - - - - - - + + Convert spherical coordinates to Cartesian. + + Given: + theta double longitude angle (radians) + phi double latitude angle (radians) + + Returned: + c double[3] direction cosines + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + theta_in = numpy.array(theta, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), theta_in, phi_in) + c_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [theta_in, phi_in, c_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*2 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s2c(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(c_out.shape) > 0 and c_out.shape[0] == 1 + c_out = c_out.reshape(c_out.shape[1:]) + + return c_out + + +def s2p(theta, phi, r): + """ + Wrapper for ERFA function ``eraS2p``. + + Parameters + ---------- + theta : double array + phi : double array + r : double array + + Returns + ------- + p : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - + e r a S 2 p + - - - - - - - + + Convert spherical polar coordinates to p-vector. + + Given: + theta double longitude angle (radians) + phi double latitude angle (radians) + r double radial distance + + Returned: + p double[3] Cartesian coordinates + + Called: + eraS2c spherical coordinates to unit vector + eraSxp multiply p-vector by scalar + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + theta_in = numpy.array(theta, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + r_in = numpy.array(r, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), theta_in, phi_in, r_in) + p_out = numpy.empty(broadcast.shape + (3,), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [theta_in, phi_in, r_in, p_out[...,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*3 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s2p(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(p_out.shape) > 0 and p_out.shape[0] == 1 + p_out = p_out.reshape(p_out.shape[1:]) + + return p_out + + +def s2pv(theta, phi, r, td, pd, rd): + """ + Wrapper for ERFA function ``eraS2pv``. + + Parameters + ---------- + theta : double array + phi : double array + r : double array + td : double array + pd : double array + rd : double array + + Returns + ------- + pv : double array + + Notes + ----- + The ERFA documentation is below. + + - - - - - - - - + e r a S 2 p v + - - - - - - - - + + Convert position/velocity from spherical to Cartesian coordinates. + + Given: + theta double longitude angle (radians) + phi double latitude angle (radians) + r double radial distance + td double rate of change of theta + pd double rate of change of phi + rd double rate of change of r + + Returned: + pv double[2][3] pv-vector + + Copyright (C) 2013-2017, NumFOCUS Foundation. + Derived, with permission, from the SOFA library. See notes at end of file. + + """ + + #Turn all inputs into arrays + theta_in = numpy.array(theta, dtype=numpy.double, order="C", copy=False, subok=True) + phi_in = numpy.array(phi, dtype=numpy.double, order="C", copy=False, subok=True) + r_in = numpy.array(r, dtype=numpy.double, order="C", copy=False, subok=True) + td_in = numpy.array(td, dtype=numpy.double, order="C", copy=False, subok=True) + pd_in = numpy.array(pd, dtype=numpy.double, order="C", copy=False, subok=True) + rd_in = numpy.array(rd, dtype=numpy.double, order="C", copy=False, subok=True) + make_outputs_scalar = False + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), theta_in, phi_in, r_in, td_in, pd_in, rd_in) + pv_out = numpy.empty(broadcast.shape + (2, 3), dtype=numpy.double) + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [theta_in, phi_in, r_in, td_in, pd_in, rd_in, pv_out[...,0,0]] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*6 + [['readwrite']]*1 + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._s2pv(it) + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + assert len(pv_out.shape) > 0 and pv_out.shape[0] == 1 + pv_out = pv_out.reshape(pv_out.shape[1:]) + + return pv_out + + + + +# TODO: delete the functions below when they can get auto-generated +# (current machinery doesn't support returning strings or non-status-codes) +def version(): + """ + Returns the package version + as defined in configure.ac + in string format + """ + return "1.4.0" + +def version_major(): + """ + Returns the package major version + as defined in configure.ac + as integer + """ + return 1 + +def version_minor(): + """ + Returns the package minor version + as defined in configure.ac + as integer + """ + return 4 + +def version_micro(): + """ + Returns the package micro version + as defined in configure.ac + as integer + """ + return 0 + +def sofa_version(): + """ + Returns the corresponding SOFA version + as defined in configure.ac + in string format + """ + return "20170420" \ No newline at end of file diff --git a/astropy/_erfa/core.py.templ b/astropy/_erfa/core.py.templ new file mode 100644 index 0000000..bdc5ccf --- /dev/null +++ b/astropy/_erfa/core.py.templ @@ -0,0 +1,285 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# "core.py" is auto-generated by erfa_generator.py from the template +# "core.py.templ". Do *not* edit "core.py" directly, instead edit +# "core.py.templ" and run erfa_generator.py from the source directory to +# update it. + +""" +This module uses the Python/C API to wrap the ERFA library in +numpy-vectorized equivalents. + +..warning:: + This is currently *not* part of the public Astropy API, and may change in + the future. + + +The key idea is that any function can be called with inputs that are arrays, +and the wrappers will automatically vectorize and call the ERFA functions for +each item using broadcasting rules for numpy. So the return values are always +numpy arrays of some sort. + +For ERFA functions that take/return vectors or matrices, the vector/matrix +dimension(s) are always the *last* dimension(s). For example, if you +want to give ten matrices (i.e., the ERFA input type is double[3][3]), +you would pass in a (10, 3, 3) numpy array. If the output of the ERFA +function is scalar, you'll get back a length-10 1D array. + +Note that the C part of these functions are implemented in a separate +module (compiled as ``_core``), derived from the ``core.c`` file. +Splitting the wrappers into separate pure-python and C portions +dramatically reduces compilation time without notably impacting +performance. (See issue [#3063] on the github repository for more +about this.) +""" +from __future__ import absolute_import, division, print_function + +import warnings + +from ..utils.exceptions import AstropyUserWarning + +import numpy +from . import _core + +# TODO: remove the above variable and the code using it and make_outputs_scalar +# when numpy < 1.8 is no longer supported + +__all__ = ['ErfaError', 'ErfaWarning', + {{ funcs|map(attribute='pyname')|surround("'","'")|join(", ") }}, + {{ constants|map(attribute='name')|surround("'","'")|join(", ") }}, + # TODO: delete the functions below when they can get auto-generated + 'version', 'version_major', 'version_minor', 'version_micro', 'sofa_version', + 'dt_eraASTROM', 'dt_eraLDBODY'] + + +# <---------------------------------Error-handling----------------------------> + +class ErfaError(ValueError): + """ + A class for errors triggered by ERFA functions (status codes < 0) + """ + + +class ErfaWarning(AstropyUserWarning): + """ + A class for warnings triggered by ERFA functions (status codes > 0) + """ + + +STATUS_CODES = {} # populated below before each function that returns an int + +# This is a hard-coded list of status codes that need to be remapped, +# such as to turn errors into warnings. +STATUS_CODES_REMAP = { + 'cal2jd': {-3: 3} +} + + +def check_errwarn(statcodes, func_name): + # Remap any errors into warnings in the STATUS_CODES_REMAP dict. + if func_name in STATUS_CODES_REMAP: + for before, after in STATUS_CODES_REMAP[func_name].items(): + statcodes[statcodes == before] = after + STATUS_CODES[func_name][after] = STATUS_CODES[func_name][before] + + if numpy.any(statcodes<0): + # errors present - only report the errors. + if statcodes.shape: + statcodes = statcodes[statcodes<0] + + errcodes = numpy.unique(statcodes) + + errcounts = dict([(e, numpy.sum(statcodes==e)) for e in errcodes]) + + elsemsg = STATUS_CODES[func_name].get('else', None) + if elsemsg is None: + errmsgs = dict([(e, STATUS_CODES[func_name].get(e, 'Return code ' + str(e))) for e in errcodes]) + else: + errmsgs = dict([(e, STATUS_CODES[func_name].get(e, elsemsg)) for e in errcodes]) + + emsg = ', '.join(['{0} of "{1}"'.format(errcounts[e], errmsgs[e]) for e in errcodes]) + raise ErfaError('ERFA function "' + func_name + '" yielded ' + emsg) + + elif numpy.any(statcodes>0): + #only warnings present + if statcodes.shape: + statcodes = statcodes[statcodes>0] + + warncodes = numpy.unique(statcodes) + + warncounts = dict([(w, numpy.sum(statcodes==w)) for w in warncodes]) + + elsemsg = STATUS_CODES[func_name].get('else', None) + if elsemsg is None: + warnmsgs = dict([(w, STATUS_CODES[func_name].get(w, 'Return code ' + str(w))) for w in warncodes]) + else: + warnmsgs = dict([(w, STATUS_CODES[func_name].get(w, elsemsg)) for w in warncodes]) + + wmsg = ', '.join(['{0} of "{1}"'.format(warncounts[w], warnmsgs[w]) for w in warncodes]) + warnings.warn('ERFA function "' + func_name + '" yielded ' + wmsg, ErfaWarning) + + +# <-------------------------trailing shape verification-----------------------> + +def check_trailing_shape(arr, shape, name): + try: + if arr.shape[-len(shape):] != shape: + raise Exception() + except: + raise ValueError("{0} must be of trailing dimensions {1}".format(name, shape)) + +# <--------------------------Actual ERFA-wrapping code------------------------> + +dt_eraASTROM = numpy.dtype([('pmt','d'), + ('eb','d',(3,)), + ('eh','d',(3,)), + ('em','d'), + ('v','d',(3,)), + ('bm1','d'), + ('bpn','d',(3,3)), + ('along','d'), + ('phi','d'), + ('xpl','d'), + ('ypl','d'), + ('sphi','d'), + ('cphi','d'), + ('diurab','d'), + ('eral','d'), + ('refa','d'), + ('refb','d')], align=True) + +dt_eraLDBODY = numpy.dtype([('bm','d'), + ('dl','d'), + ('pv','d',(2,3))], align=True) + + +{% for constant in constants %} +{{ constant.name }} = {{ constant.value }} +"""{{ constant.doc|join(' ') }}""" +{%- endfor %} + +{% for func in funcs %} +def {{ func.pyname }}({{ func.args_by_inout('in|inout')|map(attribute='name')|join(', ') }}): + """ + Wrapper for ERFA function ``{{ func.name }}``. + + Parameters + ---------- + {%- for arg in func.args_by_inout('in|inout') %} + {{ arg.name }} : {{ arg.ctype }} array + {%- endfor %} + + Returns + ------- + {%- for arg in func.args_by_inout('inout|out|ret') %} + {{ arg.name }} : {{ arg.ctype }} array + {%- endfor %} + + Notes + ----- + The ERFA documentation is below. + +{{ func.doc }} + """ + + #Turn all inputs into arrays + {%- for arg in func.args_by_inout('in|inout') %} + {{ arg.name }}_in = numpy.array({{ arg.name }}, dtype={{ arg.dtype }}, order="C", copy=False, subok=True) + {%- endfor %} + {%- for arg in func.args_by_inout('in|inout') %} + {%- if arg.ndim > 0 %} + check_trailing_shape({{ arg.name }}_in, {{ arg.shape }}, "{{arg.name}}") + {%- endif %} + {%- endfor %} + + {%- if func.args_by_inout('in|inout') %} + make_outputs_scalar = False + {%- endif %} + + #Create the output array, based on the broadcasted shape, adding the generated dimensions if needed + broadcast = numpy.broadcast(numpy.int32(0.0), numpy.int32(0.0), {{ func.args_by_inout('in|inout')|map(attribute='name_in_broadcast')|join(', ') }}) + {%- for arg in func.args_by_inout('inout|out|ret|stat') %} + {{ arg.name }}_out = numpy.empty(broadcast.shape + {{ arg.shape }}, dtype={{ arg.dtype }}) + {%- endfor %} + {%- for arg in func.args_by_inout('inout') %} + numpy.copyto({{ arg.name }}_out, {{ arg.name }}_in) + {%- endfor %} + + #Create the iterator, broadcasting on all but the consumed dimensions + arrs = [{{ (func.args_by_inout('in')|map(attribute='name_in_broadcast')|list + func.args_by_inout('inout|out|ret|stat')|map(attribute='name_out_broadcast')|list)|join(', ') }}] + op_axes = [[-1]*(broadcast.nd-arr.ndim) + list(range(arr.ndim)) for arr in arrs] + op_flags = [['readonly']]*{{ func.args_by_inout('in')|count }} + [['readwrite']]*{{ func.args_by_inout('inout|out|ret|stat')|count }} + it = numpy.nditer(arrs, op_axes=op_axes, op_flags=op_flags) + + #Iterate + stat_ok = _core._{{ func.pyname }}(it) + + {%- for arg in func.args_by_inout('stat') %} + + if not stat_ok: + check_errwarn({{ arg.name }}_out, '{{ func.pyname }}') + {%- endfor %} + + {%- if func.args_by_inout('in|inout') %} + #need to convert the outputs back to scalars if all the inputs were scalars but we made them 1d + if make_outputs_scalar: + {%- for arg in func.args_by_inout('inout|out|ret') %} + assert len({{ arg.name }}_out.shape) > 0 and {{ arg.name }}_out.shape[0] == 1 + {{ arg.name }}_out = {{ arg.name }}_out.reshape({{ arg.name }}_out.shape[1:]) + {%- else %} + pass + {%- endfor %} + {%- endif %} + + return {{ func.args_by_inout('inout|out|ret')|map(attribute='name')|postfix('_out')|join(', ') }} + +{%- for stat in func.args_by_inout('stat') %} +{%- if stat.doc_info.statuscodes %} +STATUS_CODES['{{ func.pyname }}'] = {{ stat.doc_info.statuscodes|string }} +{% endif %} +{%- endfor %} + +{% endfor %} + + +# TODO: delete the functions below when they can get auto-generated +# (current machinery doesn't support returning strings or non-status-codes) +def version(): + """ + Returns the package version + as defined in configure.ac + in string format + """ + return "1.4.0" + +def version_major(): + """ + Returns the package major version + as defined in configure.ac + as integer + """ + return 1 + +def version_minor(): + """ + Returns the package minor version + as defined in configure.ac + as integer + """ + return 4 + +def version_micro(): + """ + Returns the package micro version + as defined in configure.ac + as integer + """ + return 0 + +def sofa_version(): + """ + Returns the corresponding SOFA version + as defined in configure.ac + in string format + """ + return "20170420" diff --git a/astropy/_erfa/erfa_generator.py b/astropy/_erfa/erfa_generator.py new file mode 100644 index 0000000..20e5ab4 --- /dev/null +++ b/astropy/_erfa/erfa_generator.py @@ -0,0 +1,563 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This module's main purpose is to act as a script to create new versions +of erfa.c when ERFA is updated (or this generator is enhanced). + +`Jinja2 `_ must be installed for this +module/script to function. + +Note that this does *not* currently automate the process of creating structs +or dtypes for those structs. They should be added manually in the template file. +""" +from __future__ import absolute_import, division, print_function +# note that we do *not* use unicode_literals here, because that makes the +# generated code's strings have u'' in them on py 2.x + +import re +import os.path +from collections import OrderedDict + + +ctype_to_dtype = {'double': "numpy.double", + 'int': "numpy.intc", + 'eraASTROM': "dt_eraASTROM", + 'eraLDBODY': "dt_eraLDBODY", + 'char': "numpy.dtype('S1')", + 'const char': "numpy.dtype('S16')", + } + + +NDIMS_REX = re.compile(re.escape("numpy.dtype([('fi0', '.*', <(.*)>)])").replace(r'\.\*', '.*').replace(r'\<', '(').replace(r'\>', ')')) + + +class FunctionDoc(object): + + def __init__(self, doc): + self.doc = doc.replace("**", " ").replace("/*\n", "").replace("*/", "") + self.__input = None + self.__output = None + self.__ret_info = None + + @property + def input(self): + if self.__input is None: + self.__input = [] + result = re.search("Given([^\n]*):\n(.+?) \n", self.doc, re.DOTALL) + if result is not None: + __input = result.group(2) + for i in __input.split("\n"): + arg_doc = ArgumentDoc(i) + if arg_doc.name is not None: + self.__input.append(arg_doc) + result = re.search("Given and returned([^\n]*):\n(.+?) \n", self.doc, re.DOTALL) + if result is not None: + __input = result.group(2) + for i in __input.split("\n"): + arg_doc = ArgumentDoc(i) + if arg_doc.name is not None: + self.__input.append(arg_doc) + return self.__input + + @property + def output(self): + if self.__output is None: + self.__output = [] + result = re.search("Returned([^\n]*):\n(.+?) \n", self.doc, re.DOTALL) + if result is not None: + __output = result.group(2) + for i in __output.split("\n"): + arg_doc = ArgumentDoc(i) + if arg_doc.name is not None: + self.__output.append(arg_doc) + result = re.search("Given and returned([^\n]*):\n(.+?) \n", self.doc, re.DOTALL) + if result is not None: + __output = result.group(2) + for i in __output.split("\n"): + arg_doc = ArgumentDoc(i) + if arg_doc.name is not None: + self.__output.append(arg_doc) + return self.__output + + @property + def ret_info(self): + if self.__ret_info is None: + ret_info = [] + result = re.search("Returned \\(function value\\)([^\n]*):\n(.+?) \n", self.doc, re.DOTALL) + if result is not None: + ret_info.append(ReturnDoc(result.group(2))) + + if len(ret_info) == 0: + self.__ret_info = '' + elif len(ret_info) == 1: + self.__ret_info = ret_info[0] + else: + raise ValueError("Multiple C return sections found in this doc:\n" + self.doc) + + return self.__ret_info + + def __repr__(self): + return self.doc.replace(" \n", "\n") + + +class ArgumentDoc(object): + + def __init__(self, doc): + match = re.search("^ +([^ ]+)[ ]+([^ ]+)[ ]+(.+)", doc) + if match is not None: + self.name = match.group(1) + self.type = match.group(2) + self.doc = match.group(3) + else: + self.name = None + self.type = None + self.doc = None + + def __repr__(self): + return " {0:15} {1:15} {2}".format(self.name, self.type, self.doc) + + +class Argument(object): + + def __init__(self, definition, doc): + self.doc = doc + self.__inout_state = None + self.ctype, ptr_name_arr = definition.strip().rsplit(" ", 1) + if "*" == ptr_name_arr[0]: + self.is_ptr = True + name_arr = ptr_name_arr[1:] + else: + self.is_ptr = False + name_arr = ptr_name_arr + if "[]" in ptr_name_arr: + self.is_ptr = True + name_arr = name_arr[:-2] + if "[" in name_arr: + self.name, arr = name_arr.split("[", 1) + self.shape = tuple([int(size) for size in arr[:-1].split("][")]) + else: + self.name = name_arr + self.shape = () + + @property + def inout_state(self): + if self.__inout_state is None: + self.__inout_state = '' + for i in self.doc.input: + if self.name in i.name.split(','): + self.__inout_state = 'in' + for o in self.doc.output: + if self.name in o.name.split(','): + if self.__inout_state == 'in': + self.__inout_state = 'inout' + else: + self.__inout_state = 'out' + return self.__inout_state + + @property + def ctype_ptr(self): + if (self.is_ptr) | (len(self.shape) > 0): + return self.ctype+" *" + else: + return self.ctype + + @property + def name_in_broadcast(self): + if len(self.shape) > 0: + return "{0}_in[...{1}]".format(self.name, ",0"*len(self.shape)) + else: + return "{0}_in".format(self.name) + + @property + def name_out_broadcast(self): + if len(self.shape) > 0: + return "{0}_out[...{1}]".format(self.name, ",0"*len(self.shape)) + else: + return "{0}_out".format(self.name) + + @property + def dtype(self): + return ctype_to_dtype[self.ctype] + + @property + def ndim(self): + return len(self.shape) + + @property + def cshape(self): + return ''.join(['[{0}]'.format(s) for s in self.shape]) + + @property + def name_for_call(self): + if self.is_ptr: + return '_'+self.name + else: + return '*_'+self.name + + def __repr__(self): + return "Argument('{0}', name='{1}', ctype='{2}', inout_state='{3}')".format(self.definition, self.name, self.ctype, self.inout_state) + + +class ReturnDoc(object): + + def __init__(self, doc): + self.doc = doc + + self.infoline = doc.split('\n')[0].strip() + self.type = self.infoline.split()[0] + self.descr = self.infoline.split()[1] + + if self.descr.startswith('status'): + self.statuscodes = statuscodes = {} + + code = None + for line in doc[doc.index(':')+1:].split('\n'): + ls = line.strip() + if ls != '': + if ' = ' in ls: + code, msg = ls.split(' = ') + if code != 'else': + code = int(code) + statuscodes[code] = msg + elif code is not None: + statuscodes[code] += ls + else: + self.statuscodes = None + + def __repr__(self): + return "Return value, type={0:15}, {1}, {2}".format(self.type, self.descr, self.doc) + + +class Return(object): + + def __init__(self, ctype, doc): + self.name = 'c_retval' + self.name_out_broadcast = self.name+"_out" + self.inout_state = 'stat' if ctype == 'int' else 'ret' + self.ctype = ctype + self.ctype_ptr = ctype + self.shape = () + self.doc = doc + + def __repr__(self): + return "Return(name='{0}', ctype='{1}', inout_state='{2}')".format(self.name, self.ctype, self.inout_state) + + @property + def dtype(self): + return ctype_to_dtype[self.ctype] + + @property + def nd_dtype(self): + """ + This if the return type has a multi-dimensional output, like + double[3][3] + """ + return "'fi0'" in self.dtype + + @property + def doc_info(self): + return self.doc.ret_info + + +class Function(object): + """ + A class representing a C function. + + Parameters + ---------- + name : str + The name of the function + source_path : str + Either a directory, which means look for the function in a + stand-alone file (like for the standard ERFA distribution), or a + file, which means look for the function in that file (as for the + astropy-packaged single-file erfa.c). + match_line : str, optional + If given, searching of the source file will skip until it finds + a line matching this string, and start from there. + """ + + def __init__(self, name, source_path, match_line=None): + self.name = name + self.pyname = name.split('era')[-1].lower() + self.filename = self.pyname+".c" + if os.path.isdir(source_path): + self.filepath = os.path.join(os.path.normpath(source_path), self.filename) + else: + self.filepath = source_path + + with open(self.filepath) as f: + if match_line: + line = f.readline() + while line != '': + if line.startswith(match_line): + filecontents = '\n' + line + f.read() + break + line = f.readline() + else: + msg = ('Could not find the match_line "{0}" in ' + 'the source file "{1}"') + raise ValueError(msg.format(match_line, self.filepath)) + else: + filecontents = f.read() + + pattern = r"\n([^\n]+{0} ?\([^)]+\)).+?(/\*.+?\*/)".format(name) + p = re.compile(pattern, flags=re.DOTALL | re.MULTILINE) + + search = p.search(filecontents) + self.cfunc = " ".join(search.group(1).split()) + self.doc = FunctionDoc(search.group(2)) + + self.args = [] + for arg in re.search(r"\(([^)]+)\)", self.cfunc).group(1).split(', '): + self.args.append(Argument(arg, self.doc)) + self.ret = re.search("^(.*){0}".format(name), self.cfunc).group(1).strip() + if self.ret != 'void': + self.args.append(Return(self.ret, self.doc)) + + def args_by_inout(self, inout_filter, prop=None, join=None): + """ + Gives all of the arguments and/or returned values, depending on whether + they are inputs, outputs, etc. + + The value for `inout_filter` should be a string containing anything + that arguments' `inout_state` attribute produces. Currently, that can be: + + * "in" : input + * "out" : output + * "inout" : something that's could be input or output (e.g. a struct) + * "ret" : the return value of the C function + * "stat" : the return value of the C function if it is a status code + + It can also be a "|"-separated string giving inout states to OR + together. + """ + result = [] + for arg in self.args: + if arg.inout_state in inout_filter.split('|'): + if prop is None: + result.append(arg) + else: + result.append(getattr(arg, prop)) + if join is not None: + return join.join(result) + else: + return result + + def __repr__(self): + return "Function(name='{0}', pyname='{1}', filename='{2}', filepath='{3}')".format(self.name, self.pyname, self.filename, self.filepath) + + +class Constant(object): + + def __init__(self, name, value, doc): + self.name = name.replace("ERFA_", "") + self.value = value.replace("ERFA_", "") + self.doc = doc + + +class ExtraFunction(Function): + """ + An "extra" function - e.g. one not following the SOFA/ERFA standard format. + + Parameters + ---------- + cname : str + The name of the function in C + prototype : str + The prototype for the function (usually derived from the header) + pathfordoc : str + The path to a file that contains the prototype, with the documentation + as a multiline string *before* it. + """ + + def __init__(self, cname, prototype, pathfordoc): + self.name = cname + self.pyname = cname.split('era')[-1].lower() + self.filepath, self.filename = os.path.split(pathfordoc) + + self.prototype = prototype.strip() + if prototype.endswith('{') or prototype.endswith(';'): + self.prototype = prototype[:-1].strip() + + incomment = False + lastcomment = None + with open(pathfordoc, 'r') as f: + for l in f: + if incomment: + if l.lstrip().startswith('*/'): + incomment = False + lastcomment = ''.join(lastcomment) + else: + if l.startswith('**'): + l = l[2:] + lastcomment.append(l) + else: + if l.lstrip().startswith('/*'): + incomment = True + lastcomment = [] + if l.startswith(self.prototype): + self.doc = lastcomment + break + else: + raise ValueError('Did not find prototype {} in file ' + '{}'.format(self.prototype, pathfordoc)) + + self.args = [] + argset = re.search(r"{0}\(([^)]+)?\)".format(self.name), + self.prototype).group(1) + if argset is not None: + for arg in argset.split(', '): + self.args.append(Argument(arg, self.doc)) + self.ret = re.match("^(.*){0}".format(self.name), + self.prototype).group(1).strip() + if self.ret != 'void': + self.args.append(Return(self.ret, self.doc)) + + def __repr__(self): + r = super(ExtraFunction, self).__repr__() + if r.startswith('Function'): + r = 'Extra' + r + return r + + +def main(srcdir, outfn, templateloc, verbose=True): + from jinja2 import Environment, FileSystemLoader + + if verbose: + print_ = lambda *args, **kwargs: print(*args, **kwargs) + else: + print_ = lambda *args, **kwargs: None + + # Prepare the jinja2 templating environment + env = Environment(loader=FileSystemLoader(templateloc)) + + def prefix(a_list, pre): + return [pre+'{0}'.format(an_element) for an_element in a_list] + + def postfix(a_list, post): + return ['{0}'.format(an_element)+post for an_element in a_list] + + def surround(a_list, pre, post): + return [pre+'{0}'.format(an_element)+post for an_element in a_list] + env.filters['prefix'] = prefix + env.filters['postfix'] = postfix + env.filters['surround'] = surround + + erfa_c_in = env.get_template('core.c.templ') + erfa_py_in = env.get_template('core.py.templ') + + # Extract all the ERFA function names from erfa.h + if os.path.isdir(srcdir): + erfahfn = os.path.join(srcdir, 'erfa.h') + multifilserc = True + else: + erfahfn = os.path.join(os.path.split(srcdir)[0], 'erfa.h') + multifilserc = False + + with open(erfahfn, "r") as f: + erfa_h = f.read() + + funcs = OrderedDict() + section_subsection_functions = re.findall(r'/\* (\w*)/(\w*) \*/\n(.*?)\n\n', + erfa_h, flags=re.DOTALL | re.MULTILINE) + for section, subsection, functions in section_subsection_functions: + print_("{0}.{1}".format(section, subsection)) + if ((section == "Astronomy") or (subsection == "AngleOps") + or (subsection == "SphericalCartesian") + or (subsection == "MatrixVectorProducts")): + func_names = re.findall(r' (\w+)\(.*?\);', functions, flags=re.DOTALL) + for name in func_names: + print_("{0}.{1}.{2}...".format(section, subsection, name)) + if multifilserc: + # easy because it just looks in the file itself + funcs[name] = Function(name, srcdir) + else: + # Have to tell it to look for a declaration matching + # the start of the header declaration, otherwise it + # might find a *call* of the function instead of the + # definition + for line in functions.split(r'\n'): + if name in line: + # [:-1] is to remove trailing semicolon, and + # splitting on '(' is because the header and + # C files don't necessarily have to match + # argument names and line-breaking or + # whitespace + match_line = line[:-1].split('(')[0] + funcs[name] = Function(name, srcdir, match_line) + break + else: + raise ValueError("A name for a C file wasn't " + "found in the string that " + "spawned it. This should be " + "impossible!") + + funcs = list(funcs.values()) + + # Extract all the ERFA constants from erfam.h + erfamhfn = os.path.join(srcdir, 'erfam.h') + with open(erfamhfn, 'r') as f: + erfa_m_h = f.read() + constants = [] + for chunk in erfa_m_h.split("\n\n"): + result = re.findall(r"#define (ERFA_\w+?) (.+?)$", chunk, flags=re.DOTALL | re.MULTILINE) + if result: + doc = re.findall(r"/\* (.+?) \*/\n", chunk, flags=re.DOTALL) + for (name, value) in result: + constants.append(Constant(name, value, doc)) + + # TODO: re-enable this when const char* return values and non-status code integer rets are possible + # #Add in any "extra" functions from erfaextra.h + # erfaextrahfn = os.path.join(srcdir, 'erfaextra.h') + # with open(erfaextrahfn, 'r') as f: + # for l in f: + # ls = l.strip() + # match = re.match('.* (era.*)\(', ls) + # if match: + # print_("Extra: {0} ...".format(match.group(1))) + # funcs.append(ExtraFunction(match.group(1), ls, erfaextrahfn)) + + print_("Rendering template") + erfa_c = erfa_c_in.render(funcs=funcs) + erfa_py = erfa_py_in.render(funcs=funcs, constants=constants) + + if outfn is not None: + outfn_c = os.path.splitext(outfn)[0] + ".c" + print_("Saving to", outfn, 'and', outfn_c) + with open(outfn, "w") as f: + f.write(erfa_py) + with open(outfn_c, "w") as f: + f.write(erfa_c) + + print_("Done!") + + return erfa_c, erfa_py, funcs + + +DEFAULT_ERFA_LOC = os.path.join(os.path.split(__file__)[0], + '../../cextern/erfa') +DEFAULT_TEMPLATE_LOC = os.path.split(__file__)[0] + +if __name__ == '__main__': + from argparse import ArgumentParser + + ap = ArgumentParser() + ap.add_argument('srcdir', default=DEFAULT_ERFA_LOC, nargs='?', + help='Directory where the ERFA c and header files ' + 'can be found or to a single erfa.c file ' + '(which must be in the same directory as ' + 'erfa.h). Defaults to the builtin astropy ' + 'erfa: "{0}"'.format(DEFAULT_ERFA_LOC)) + ap.add_argument('-o', '--output', default='core.py', + help='The output filename. This is the name for only the ' + 'pure-python output, the C part will have the ' + 'same name but with a ".c" extension.') + ap.add_argument('-t', '--template-loc', + default=DEFAULT_TEMPLATE_LOC, + help='the location where the "core.c.templ" ' + 'template can be found.') + ap.add_argument('-q', '--quiet', action='store_false', dest='verbose', + help='Suppress output normally printed to stdout.') + + args = ap.parse_args() + main(args.srcdir, args.output, args.template_loc) diff --git a/astropy/_erfa/setup_package.py b/astropy/_erfa/setup_package.py new file mode 100644 index 0000000..d609d16 --- /dev/null +++ b/astropy/_erfa/setup_package.py @@ -0,0 +1,118 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import absolute_import + +import os +import glob + +from distutils import log +from distutils.extension import Extension + +from astropy_helpers import setup_helpers +from astropy_helpers.version_helpers import get_pkg_version_module + +ERFAPKGDIR = os.path.relpath(os.path.dirname(__file__)) + +ERFA_SRC = os.path.abspath(os.path.join(ERFAPKGDIR, '..', '..', 'cextern', 'erfa')) + +SRC_FILES = glob.glob(os.path.join(ERFA_SRC, '*')) +SRC_FILES += [os.path.join(ERFAPKGDIR, filename) + for filename in ['core.py.templ', 'core.c.templ', 'erfa_generator.py']] + +GEN_FILES = [os.path.join(ERFAPKGDIR, 'core.py'), os.path.join(ERFAPKGDIR, 'core.c')] + + +def pre_build_py_hook(cmd_obj): + preprocess_source() + + +def pre_build_ext_hook(cmd_obj): + preprocess_source() + + +def pre_sdist_hook(cmd_obj): + preprocess_source() + + +def preprocess_source(): + # Generating the ERFA wrappers should only be done if needed. This also + # ensures that it is not done for any release tarball since those will + # include core.py and core.c. + if all(os.path.exists(filename) for filename in GEN_FILES): + + # Determine modification times + erfa_mtime = max(os.path.getmtime(filename) for filename in SRC_FILES) + gen_mtime = min(os.path.getmtime(filename) for filename in GEN_FILES) + + version = get_pkg_version_module('astropy') + + if gen_mtime > erfa_mtime: + # If generated source is recent enough, don't update + return + elif version.release: + # or, if we're on a release, issue a warning, but go ahead and use + # the wrappers anyway + log.warn('WARNING: The autogenerated wrappers in astropy._erfa ' + 'seem to be older than the source templates used to ' + 'create them. Because this is a release version we will ' + 'use them anyway, but this might be a sign of some sort ' + 'of version mismatch or other tampering. Or it might just ' + 'mean you moved some files around or otherwise ' + 'accidentally changed timestamps.') + return + # otherwise rebuild the autogenerated files + + # If jinja2 isn't present, then print a warning and use existing files + try: + import jinja2 # pylint: disable=W0611 + except ImportError: + log.warn("WARNING: jinja2 could not be imported, so the existing " + "ERFA core.py and core.c files will be used") + return + + name = 'erfa_generator' + filename = os.path.join(ERFAPKGDIR, 'erfa_generator.py') + + try: + from importlib import machinery as import_machinery + loader = import_machinery.SourceFileLoader(name, filename) + gen = loader.load_module() + except ImportError: + import imp + gen = imp.load_source(name, filename) + + gen.main(gen.DEFAULT_ERFA_LOC, + os.path.join(ERFAPKGDIR, 'core.py'), + gen.DEFAULT_TEMPLATE_LOC, + verbose=False) + + +def get_extensions(): + sources = [os.path.join(ERFAPKGDIR, "core.c")] + include_dirs = ['numpy'] + libraries = [] + + if setup_helpers.use_system_library('erfa'): + libraries.append('erfa') + else: + # get all of the .c files in the cextern/erfa directory + erfafns = os.listdir(ERFA_SRC) + sources.extend(['cextern/erfa/'+fn for fn in erfafns if fn.endswith('.c')]) + + include_dirs.append('cextern/erfa') + + erfa_ext = Extension( + name="astropy._erfa._core", + sources=sources, + include_dirs=include_dirs, + libraries=libraries, + language="c",) + + return [erfa_ext] + + +def get_external_libraries(): + return ['erfa'] + + +def requires_2to3(): + return False diff --git a/astropy/_erfa/tests/__init__.py b/astropy/_erfa/tests/__init__.py new file mode 100644 index 0000000..9dce85d --- /dev/null +++ b/astropy/_erfa/tests/__init__.py @@ -0,0 +1 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst diff --git a/astropy/_erfa/tests/test_erfa.py b/astropy/_erfa/tests/test_erfa.py new file mode 100644 index 0000000..493d54a --- /dev/null +++ b/astropy/_erfa/tests/test_erfa.py @@ -0,0 +1,233 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +import numpy as np +from .. import core as erfa +from ...tests.helper import catch_warnings + + +def test_erfa_wrapper(): + """ + Runs a set of tests that mostly make sure vectorization is + working as expected + """ + + jd = np.linspace(2456855.5, 2456855.5+1.0/24.0/60.0, 60*2+1) + ra = np.linspace(0.0, np.pi*2.0, 5) + dec = np.linspace(-np.pi/2.0, np.pi/2.0, 4) + + aob, zob, hob, dob, rob, eo = erfa.atco13(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, jd, 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5) + assert aob.shape == (121,) + + aob, zob, hob, dob, rob, eo = erfa.atco13(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, jd[0], 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5) + assert aob.shape == () + + aob, zob, hob, dob, rob, eo = erfa.atco13(ra[:, None, None], dec[None, :, None], 0.0, 0.0, 0.0, 0.0, jd[None, None, :], 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5) + (aob.shape) == (5, 4, 121) + + iy, im, id, ihmsf = erfa.d2dtf("UTC", 3, jd, 0.0) + assert iy.shape == (121,) + assert ihmsf.shape == (121, 4) + assert ihmsf.dtype == np.dtype('i4') + + iy, im, id, ihmsf = erfa.d2dtf("UTC", 3, jd[0], 0.0) + assert iy.shape == () + assert ihmsf.shape == (4,) + assert ihmsf.dtype == np.dtype('i4') + + +def test_angle_ops(): + + sign, idmsf = erfa.a2af(6, -np.pi) + assert sign == b'-' + assert (idmsf == [180, 0, 0, 0]).all() + + sign, ihmsf = erfa.a2tf(6, np.pi) + assert sign == b'+' + assert (ihmsf == [12, 0, 0, 0]).all() + + rad = erfa.af2a('-', 180, 0, 0.0) + np.testing.assert_allclose(rad, -np.pi) + + rad = erfa.tf2a('+', 12, 0, 0.0) + np.testing.assert_allclose(rad, np.pi) + + rad = erfa.anp(3.*np.pi) + np.testing.assert_allclose(rad, np.pi) + + rad = erfa.anpm(3.*np.pi) + np.testing.assert_allclose(rad, -np.pi) + + sign, ihmsf = erfa.d2tf(1, -1.5) + assert sign == b'-' + assert (ihmsf == [36, 0, 0, 0]).all() + + days = erfa.tf2d('+', 3, 0, 0.0) + np.testing.assert_allclose(days, 0.125) + + +def test_spherical_cartesian(): + + theta, phi = erfa.c2s([0.0, np.sqrt(2.0), np.sqrt(2.0)]) + np.testing.assert_allclose(theta, np.pi/2.0) + np.testing.assert_allclose(phi, np.pi/4.0) + + theta, phi, r = erfa.p2s([0.0, np.sqrt(2.0), np.sqrt(2.0)]) + np.testing.assert_allclose(theta, np.pi/2.0) + np.testing.assert_allclose(phi, np.pi/4.0) + np.testing.assert_allclose(r, 2.0) + + theta, phi, r, td, pd, rd = erfa.pv2s([[0.0, np.sqrt(2.0), np.sqrt(2.0)], [1.0, 0.0, 0.0]]) + np.testing.assert_allclose(theta, np.pi/2.0) + np.testing.assert_allclose(phi, np.pi/4.0) + np.testing.assert_allclose(r, 2.0) + np.testing.assert_allclose(td, -np.sqrt(2.0)/2.0) + np.testing.assert_allclose(pd, 0.0) + np.testing.assert_allclose(rd, 0.0) + + c = erfa.s2c(np.pi/2.0, np.pi/4.0) + np.testing.assert_allclose(c, [0.0, np.sqrt(2.0)/2.0, np.sqrt(2.0)/2.0], atol=1e-14) + + c = erfa.s2p(np.pi/2.0, np.pi/4.0, 1.0) + np.testing.assert_allclose(c, [0.0, np.sqrt(2.0)/2.0, np.sqrt(2.0)/2.0], atol=1e-14) + + pv = erfa.s2pv(np.pi/2.0, np.pi/4.0, 2.0, np.sqrt(2.0)/2.0, 0.0, 0.0) + np.testing.assert_allclose(pv, [[0.0, np.sqrt(2.0), np.sqrt(2.0)], [-1.0, 0.0, 0.0]], atol=1e-14) + + +def test_errwarn_reporting(): + """ + Test that the ERFA error reporting mechanism works as it should + """ + + # no warning + erfa.dat(1990, 1, 1, 0.5) + + # check warning is raised for a scalar + with catch_warnings() as w: + erfa.dat(100, 1, 1, 0.5) + assert len(w) == 1 + assert w[0].category == erfa.ErfaWarning + assert '1 of "dubious year (Note 1)"' in str(w[0].message) + + # and that the count is right for a vector. + with catch_warnings() as w: + erfa.dat([100, 200, 1990], 1, 1, 0.5) + assert len(w) == 1 + assert w[0].category == erfa.ErfaWarning + assert '2 of "dubious year (Note 1)"' in str(w[0].message) + + try: + erfa.dat(1990, [1, 34, 2], [1, 1, 43], 0.5) + except erfa.ErfaError as e: + if '1 of "bad day (Note 3)", 1 of "bad month"' not in e.args[0]: + assert False, 'Raised the correct type of error, but wrong message: ' + e.args[0] + + try: + erfa.dat(200, [1, 34, 2], [1, 1, 43], 0.5) + except erfa.ErfaError as e: + if 'warning' in e.args[0]: + assert False, 'Raised the correct type of error, but there were warnings mixed in: ' + e.args[0] + + +def test_vector_inouts(): + """ + Tests that ERFA functions working with vectors are correctly consumed and spit out + """ + + # values are from test_erfa.c t_ab function + pnat = [-0.76321968546737951, + -0.60869453983060384, + -0.21676408580639883] + v = [2.1044018893653786e-5, + -8.9108923304429319e-5, + -3.8633714797716569e-5] + s = 0.99980921395708788 + bm1 = 0.99999999506209258 + + expected = [-0.7631631094219556269, + -0.6087553082505590832, + -0.2167926269368471279] + + res = erfa.ab(pnat, v, s, bm1) + assert res.shape == (3,) + + np.testing.assert_allclose(res, expected) + + res2 = erfa.ab([pnat]*4, v, s, bm1) + assert res2.shape == (4, 3) + np.testing.assert_allclose(res2, [expected]*4) + + # here we stride an array and also do it Fortran-order to make sure + # it all still works correctly with non-contig arrays + pnata = np.array(pnat) + arrin = np.array([pnata, pnata/2, pnata/3, pnata/4, pnata/5]*4, order='F') + res3 = erfa.ab(arrin[::5], v, s, bm1) + assert res3.shape == (4, 3) + np.testing.assert_allclose(res3, [expected]*4) + + +def test_matrix_in(): + jd1 = 2456165.5 + jd2 = 0.401182685 + + pvmat = np.empty((2, 3)) + pvmat[0][0] = -6241497.16 + pvmat[0][1] = 401346.896 + pvmat[0][2] = -1251136.04 + pvmat[1][0] = -29.264597 + pvmat[1][1] = -455.021831 + pvmat[1][2] = 0.0266151194 + + astrom = erfa.apcs13(jd1, jd2, pvmat) + assert astrom.shape == () + + # values from t_erfa_c + np.testing.assert_allclose(astrom['pmt'], 12.65133794027378508) + np.testing.assert_allclose(astrom['em'], 1.010428384373318379) + np.testing.assert_allclose(astrom['eb'], [.9012691529023298391, + -.4173999812023068781, + -.1809906511146821008]) + np.testing.assert_allclose(astrom['bpn'], np.eye(3)) + + # first make sure it *fails* if we mess with the input orders + pvmatbad = np.roll(pvmat.ravel(), 1).reshape((2, 3)) + astrombad = erfa.apcs13(jd1, jd2, pvmatbad) + assert not np.allclose(astrombad['em'], 1.010428384373318379) + + pvmatarr = np.array([pvmat]*3) + astrom2 = erfa.apcs13(jd1, jd2, pvmatarr) + assert astrom2.shape == (3,) + np.testing.assert_allclose(astrom2['em'], 1.010428384373318379) + + # try striding of the input array to make non-contiguous + pvmatarr = np.array([pvmat]*9)[::3] + astrom3 = erfa.apcs13(jd1, jd2, pvmatarr) + assert astrom3.shape == (3,) + np.testing.assert_allclose(astrom3['em'], 1.010428384373318379) + + # try fortran-order + pvmatarr = np.array([pvmat]*3, order='F') + astrom4 = erfa.apcs13(jd1, jd2, pvmatarr) + assert astrom4.shape == (3,) + np.testing.assert_allclose(astrom4['em'], 1.010428384373318379) + + +def test_structs(): + """ + Checks producing and consuming of ERFA c structs + """ + + am, eo = erfa.apci13(2456165.5, [0.401182685, 1]) + assert am.shape == (2, ) + assert am.dtype == erfa.dt_eraASTROM + assert eo.shape == (2, ) + + # a few spotchecks from test_erfa.c + np.testing.assert_allclose(am[0]['pmt'], 12.65133794027378508) + np.testing.assert_allclose(am[0]['v'], [0.4289638897157027528e-4, + 0.8115034002544663526e-4, + 0.3517555122593144633e-4]) + + ri, di = erfa.atciqz(2.71, 0.174, am[0]) + np.testing.assert_allclose(ri, 2.709994899247599271) + np.testing.assert_allclose(di, 0.1728740720983623469) diff --git a/astropy/analytic_functions/__init__.py b/astropy/analytic_functions/__init__.py new file mode 100644 index 0000000..439fcd0 --- /dev/null +++ b/astropy/analytic_functions/__init__.py @@ -0,0 +1,10 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""This package contains analytic functions useful for astronomy. + +In future versions of ``astropy``, many of these might be +accessible as `~astropy.modeling.core.Model`. + +""" + +# Shortcuts for most commonly used blackbody functions +from .blackbody import blackbody_nu, blackbody_lambda diff --git a/astropy/analytic_functions/blackbody.py b/astropy/analytic_functions/blackbody.py new file mode 100644 index 0000000..cdfdb39 --- /dev/null +++ b/astropy/analytic_functions/blackbody.py @@ -0,0 +1,79 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""Functions related to blackbody radiation.""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +# LOCAL +from ..modeling import blackbody as _bb +from ..utils.decorators import deprecated + + +__all__ = ['blackbody_nu', 'blackbody_lambda'] + +# Units +FNU = _bb.FNU +FLAM = _bb.FLAM + + +@deprecated('2.0', alternative='astropy.modeling.blackbody.blackbody_nu') +def blackbody_nu(in_x, temperature): + """Calculate blackbody flux per steradian, :math:`B_{\\nu}(T)`. + + .. note:: + + Use `numpy.errstate` to suppress Numpy warnings, if desired. + + .. warning:: + + Output values might contain ``nan`` and ``inf``. + + Parameters + ---------- + in_x : number, array-like, or `~astropy.units.Quantity` + Frequency, wavelength, or wave number. + If not a Quantity, it is assumed to be in Hz. + + temperature : number, array-like, or `~astropy.units.Quantity` + Blackbody temperature. + If not a Quantity, it is assumed to be in Kelvin. + + Returns + ------- + flux : `~astropy.units.Quantity` + Blackbody monochromatic flux in + :math:`erg \\; cm^{-2} s^{-1} Hz^{-1} sr^{-1}`. + + Raises + ------ + ValueError + Invalid temperature. + + ZeroDivisionError + Wavelength is zero (when converting to frequency). + + """ + return _bb.blackbody_nu(in_x, temperature) + + +@deprecated('2.0', alternative='astropy.modeling.blackbody.blackbody_lambda') +def blackbody_lambda(in_x, temperature): + """Like :func:`blackbody_nu` but for :math:`B_{\\lambda}(T)`. + + Parameters + ---------- + in_x : number, array-like, or `~astropy.units.Quantity` + Frequency, wavelength, or wave number. + If not a Quantity, it is assumed to be in Angstrom. + + temperature : number, array-like, or `~astropy.units.Quantity` + Blackbody temperature. + If not a Quantity, it is assumed to be in Kelvin. + + Returns + ------- + flux : `~astropy.units.Quantity` + Blackbody monochromatic flux in + :math:`erg \\; cm^{-2} s^{-1} \\mathring{A}^{-1} sr^{-1}`. + + """ + return _bb.blackbody_lambda(in_x, temperature) diff --git a/astropy/analytic_functions/tests/__init__.py b/astropy/analytic_functions/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/astropy/analytic_functions/tests/test_blackbody.py b/astropy/analytic_functions/tests/test_blackbody.py new file mode 100644 index 0000000..ceb429c --- /dev/null +++ b/astropy/analytic_functions/tests/test_blackbody.py @@ -0,0 +1,22 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""Tests for blackbody functions.""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +# LOCAL +from ..blackbody import blackbody_nu, blackbody_lambda +from ... import units as u +from ...tests.helper import catch_warnings +from ...utils.exceptions import AstropyDeprecationWarning + +__doctest_skip__ = ['*'] + + +def test_deprecated_blackbodies(): + with catch_warnings(AstropyDeprecationWarning) as w: + blackbody_nu(5000 * u.AA, 6000 * u.K) + assert len(w) == 1 + + with catch_warnings(AstropyDeprecationWarning) as w: + blackbody_lambda(5000 * u.AA, 6000 * u.K) + assert len(w) == 1 diff --git a/astropy/astropy.cfg b/astropy/astropy.cfg new file mode 100644 index 0000000..af7932f --- /dev/null +++ b/astropy/astropy.cfg @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- + +### CONSOLE SETTINGS + +## Use Unicode characters when outputting values, and writing widgets to the +## console. +# unicode_output = False + +## When True, use ANSI color escape sequences when writing to the console. +# use_color = True + +## Maximum number of lines for the pretty-printer. If not provided, +## determine automatically from the size of the terminal. -1 means no +## limit. +# max_lines = + +## Maximum number of characters-per-line for the pretty-printer. If +## not provided, determine automatically from the size of the +## terminal, if possible. -1 means no limit. +# max_width = + + +### CORE DATA STRUCTURES AND TRANSFORMATIONS + +[nddata] + +## Whether to issue a warning if NDData arithmetic is performed with +## uncertainties and the uncertainties do not support the propagation of +## correlated uncertainties. +# warn_unsupported_correlated = True + +## Whether to issue a warning when the `~astropy.nddata.NDData` unit +## attribute is changed from a non-``None`` value to another value +## that data values/uncertainties are not scaled with the unit change. +# warn_setting_unit_directly = True + +[table] + +## The template that determines the name of a column if it cannot be +## determined. Uses new-style (format method) string formatting +# auto_colname = col{0} + +[table.jsviewer] + +## The URL to the jQuery library to use. If not provided, uses the +## internal copy installed with astropy. +# jquery_url = + +## The URL to the jQuery datatables library to use. If not provided, +## uses the internal copy installed with astropy. +# datatables_url = + +### ASTRONOMY COMPUTATIONS AND UTILITIES + +[vo] + +## URL where VO Service database file is stored. +# vos_baseurl = http://stsdas.stsci.edu/astrolib/vo_databases/ + +## Conesearch database name. +# conesearch_dbname = conesearch_good + +[samp] + +## Whether to allow astropy.samp to use the internet, if available +# use_internet = True + +## How many times to retry communications when they fail +# n_retries = 10 + +[vo.validator] + +## Cone Search services master list for validation. +# conesearch_master_list = http://vao.stsci.edu/directory/NVORegInt.asmx/VOTCapabilityPredOpt?predicate=1%3D1&capability=conesearch&VOTStyleOption=2 + +## Only check these Cone Search URLs. +# conesearch_urls = + +## VO Table warning codes that are considered non-critical +# noncritical_warnings = W03, W06, W07, W09, W10, W15, W17, W20, W21, W22, W27, W28, W29, W41, W42, W48, W50 + + +### INPUT/OUTPUT + +[io.fits] + +## If True, enable support for record-valued keywords as described by FITS WCS +## Paper IV. Otherwise they are treated as normal keywords. +# enable_record_valued_keyword_cards = True + +## If True, extension names (i.e. the EXTNAME keyword) should be treated as +## case-sensitive. +# extension_name_case_sensitive = False + +## If True, automatically remove trailing whitespace for string values in +## headers. Otherwise the values are returned verbatim, with all whitespace +## intact. +# strip_header_whitespace = True + +## If True, use memory-mapped file access to read/write the data in FITS files. +## This generally provides better performance, especially for large files, but +## may affect performance in I/O-heavy applications. +# use_memmap = True + +[io.votable] + +## When True, treat fixable violations of the VOTable spec as exceptions. +# pedantic = True + + +### NUTS AND BOLTS OF ASTROPY + + +[logger] + +## Threshold for the logging messages. Logging messages that are less severe +## than this level will be ignored. The levels are 'DEBUG', 'INFO', 'WARNING', +## 'ERROR' +# log_level = INFO + +## Whether to log warnings.warn calls +# log_warnings = True + +## Whether to log exceptions before raising them +# log_exceptions = False + +## Whether to always log messages to a log file +# log_to_file = False + +## The file to log messages to. When '', it defaults to a file 'astropy.log' in +## the astropy config directory. +# log_file_path = "" + +## Threshold for logging messages to log_file_path +# log_file_level = INFO + +## Format for log file entries +# log_file_format = "%(asctime)r, %(origin)r, %(levelname)r, %(message)r" + +[utils.data] + +## URL for astropy remote data site. +# dataurl = http://data.astropy.org/ + +## Time to wait for remote data query (in seconds). +# remote_timeout = 3.0 + +## Block size for computing MD5 file hashes. +# hash_block_size = 65536 + +## Number of bytes of remote data to download per step. +# download_block_size = 65536 + +## Number of times to try to get the lock while accessing the data cache before +## giving up. +# download_cache_lock_attempts = 5 + +## If True, temporary download files created when the cache is inacessible will +## be deleted at the end of the python session. +# delete_temporary_downloads_at_exit = True diff --git a/astropy/config/__init__.py b/astropy/config/__init__.py new file mode 100644 index 0000000..0643f7c --- /dev/null +++ b/astropy/config/__init__.py @@ -0,0 +1,13 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module contains configuration and setup utilities for the +Astropy project. This includes all functionality related to the +affiliated package index. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from .paths import * +from .configuration import * +from .affiliated import * diff --git a/astropy/config/affiliated.py b/astropy/config/affiliated.py new file mode 100644 index 0000000..305dfed --- /dev/null +++ b/astropy/config/affiliated.py @@ -0,0 +1,9 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""This module contains functions and classes for finding information about +affiliated packages and installing them. +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +__all__ = [] diff --git a/astropy/config/configuration.py b/astropy/config/configuration.py new file mode 100644 index 0000000..dca52f9 --- /dev/null +++ b/astropy/config/configuration.py @@ -0,0 +1,724 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""This module contains classes and functions to standardize access to +configuration files for Astropy and affiliated packages. + +.. note:: + The configuration system makes use of the 'configobj' package, which stores + configuration in a text format like that used in the standard library + `ConfigParser`. More information and documentation for configobj can be + found at http://www.voidspace.org.uk/python/configobj.html. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ..extern import six + +from contextlib import contextmanager +import hashlib +import io +from os import path +import re +from warnings import warn + +from ..extern.configobj import configobj, validate +from ..utils.exceptions import AstropyWarning, AstropyDeprecationWarning +from ..utils import find_current_module +from ..utils.introspection import resolve_name +from ..utils.misc import InheritDocstrings +from .paths import get_config_dir + + +__all__ = ['InvalidConfigurationItemWarning', + 'ConfigurationMissingWarning', 'get_config', + 'reload_config', 'ConfigNamespace', 'ConfigItem'] + + +class InvalidConfigurationItemWarning(AstropyWarning): + """ A Warning that is issued when the configuration value specified in the + astropy configuration file does not match the type expected for that + configuration value. + """ + + +class ConfigurationMissingWarning(AstropyWarning): + """ A Warning that is issued when the configuration directory cannot be + accessed (usually due to a permissions problem). If this warning appears, + configuration items will be set to their defaults rather than read from the + configuration file, and no configuration will persist across sessions. + """ + + +# these are not in __all__ because it's not intended that a user ever see them +class ConfigurationDefaultMissingError(ValueError): + """ An exception that is raised when the configuration defaults (which + should be generated at build-time) are missing. + """ + + +# this is used in astropy/__init__.py +class ConfigurationDefaultMissingWarning(AstropyWarning): + """ A warning that is issued when the configuration defaults (which + should be generated at build-time) are missing. + """ + + +class ConfigurationChangedWarning(AstropyWarning): + """ + A warning that the configuration options have changed. + """ + + +class _ConfigNamespaceMeta(type): + def __init__(cls, name, bases, dict): + if cls.__bases__[0] is object: + return + + for key, val in six.iteritems(dict): + if isinstance(val, ConfigItem): + val.name = key + + +@six.add_metaclass(_ConfigNamespaceMeta) +class ConfigNamespace(object): + """ + A namespace of configuration items. Each subpackage with + configuration items should define a subclass of this class, + containing `ConfigItem` instances as members. + + For example:: + + class Conf(_config.ConfigNamespace): + unicode_output = _config.ConfigItem( + False, + 'Use Unicode characters when outputting values, ...') + use_color = _config.ConfigItem( + sys.platform != 'win32', + 'When True, use ANSI color escape sequences when ...', + aliases=['astropy.utils.console.USE_COLOR']) + conf = Conf() + """ + def set_temp(self, attr, value): + """ + Temporarily set a configuration value. + + Parameters + ---------- + attr : str + Configuration item name + + value : object + The value to set temporarily. + + Examples + -------- + >>> import astropy + >>> with astropy.conf.set_temp('use_color', False): + ... pass + ... # console output will not contain color + >>> # console output contains color again... + """ + if hasattr(self, attr): + return self.__class__.__dict__[attr].set_temp(value) + raise AttributeError("No configuration parameter '{0}'".format(attr)) + + def reload(self, attr=None): + """ + Reload a configuration item from the configuration file. + + Parameters + ---------- + attr : str, optional + The name of the configuration parameter to reload. If not + provided, reload all configuration parameters. + """ + if attr is not None: + if hasattr(self, attr): + return self.__class__.__dict__[attr].reload() + raise AttributeError("No configuration parameter '{0}'".format(attr)) + + for item in six.itervalues(self.__class__.__dict__): + if isinstance(item, ConfigItem): + item.reload() + + def reset(self, attr=None): + """ + Reset a configuration item to its default. + + Parameters + ---------- + attr : str, optional + The name of the configuration parameter to reload. If not + provided, reset all configuration parameters. + """ + if attr is not None: + if hasattr(self, attr): + prop = self.__class__.__dict__[attr] + prop.set(prop.defaultvalue) + return + raise AttributeError("No configuration parameter '{0}'".format(attr)) + + for item in six.itervalues(self.__class__.__dict__): + if isinstance(item, ConfigItem): + item.set(item.defaultvalue) + + +@six.add_metaclass(InheritDocstrings) +class ConfigItem(object): + """ + A setting and associated value stored in a configuration file. + + These objects should be created as members of + `ConfigNamespace` subclasses, for example:: + + class _Conf(config.ConfigNamespace): + unicode_output = config.ConfigItem( + False, + 'Use Unicode characters when outputting values, and writing widgets ' + 'to the console.') + conf = _Conf() + + Parameters + ---------- + defaultvalue : object, optional + The default value for this item. If this is a list of strings, this + item will be interpreted as an 'options' value - this item must be one + of those values, and the first in the list will be taken as the default + value. + + description : str or None, optional + A description of this item (will be shown as a comment in the + configuration file) + + cfgtype : str or None, optional + A type specifier like those used as the *values* of a particular key + in a ``configspec`` file of ``configobj``. If None, the type will be + inferred from the default value. + + module : str or None, optional + The full module name that this item is associated with. The first + element (e.g. 'astropy' if this is 'astropy.config.configuration') + will be used to determine the name of the configuration file, while + the remaining items determine the section. If None, the package will be + inferred from the package within whiich this object's initializer is + called. + + aliases : str, or list of str, optional + The deprecated location(s) of this configuration item. If the + config item is not found at the new location, it will be + searched for at all of the old locations. + + Raises + ------ + RuntimeError + If ``module`` is `None`, but the module this item is created from + cannot be determined. + """ + + # this is used to make validation faster so a Validator object doesn't + # have to be created every time + _validator = validate.Validator() + cfgtype = None + """ + A type specifier like those used as the *values* of a particular key in a + ``configspec`` file of ``configobj``. + """ + + def __init__(self, defaultvalue='', description=None, cfgtype=None, + module=None, aliases=None): + from ..utils import isiterable + + if module is None: + module = find_current_module(2) + if module is None: + msg1 = 'Cannot automatically determine get_config module, ' + msg2 = 'because it is not called from inside a valid module' + raise RuntimeError(msg1 + msg2) + else: + module = module.__name__ + + self.module = module + self.description = description + self.__doc__ = description + + # now determine cfgtype if it is not given + if cfgtype is None: + if (isiterable(defaultvalue) and not + isinstance(defaultvalue, six.string_types)): + # it is an options list + dvstr = [six.text_type(v) for v in defaultvalue] + cfgtype = 'option(' + ', '.join(dvstr) + ')' + defaultvalue = dvstr[0] + elif isinstance(defaultvalue, bool): + cfgtype = 'boolean' + elif isinstance(defaultvalue, int): + cfgtype = 'integer' + elif isinstance(defaultvalue, float): + cfgtype = 'float' + elif isinstance(defaultvalue, six.string_types): + cfgtype = 'string' + defaultvalue = six.text_type(defaultvalue) + + self.cfgtype = cfgtype + + self._validate_val(defaultvalue) + self.defaultvalue = defaultvalue + + if aliases is None: + self.aliases = [] + elif isinstance(aliases, six.string_types): + self.aliases = [aliases] + else: + self.aliases = aliases + + def __set__(self, obj, value): + return self.set(value) + + def __get__(self, obj, objtype=None): + if obj is None: + return self + return self() + + def set(self, value): + """ + Sets the current value of this ``ConfigItem``. + + This also updates the comments that give the description and type + information. + + Parameters + ---------- + value + The value this item should be set to. + + Raises + ------ + TypeError + If the provided ``value`` is not valid for this ``ConfigItem``. + """ + try: + value = self._validate_val(value) + except validate.ValidateError as e: + msg = 'Provided value for configuration item {0} not valid: {1}' + raise TypeError(msg.format(self.name, e.args[0])) + + sec = get_config(self.module) + + sec[self.name] = value + + @contextmanager + def set_temp(self, value): + """ + Sets this item to a specified value only inside a with block. + + Use as:: + + ITEM = ConfigItem('ITEM', 'default', 'description') + + with ITEM.set_temp('newval'): + #... do something that wants ITEM's value to be 'newval' ... + print(ITEM) + + # ITEM is now 'default' after the with block + + Parameters + ---------- + value + The value to set this item to inside the with block. + + """ + initval = self() + self.set(value) + try: + yield + finally: + self.set(initval) + + def reload(self): + """ Reloads the value of this ``ConfigItem`` from the relevant + configuration file. + + Returns + ------- + val + The new value loaded from the configuration file. + """ + self.set(self.defaultvalue) + baseobj = get_config(self.module, True) + secname = baseobj.name + + cobj = baseobj + # a ConfigObj's parent is itself, so we look for the parent with that + while cobj.parent is not cobj: + cobj = cobj.parent + + newobj = configobj.ConfigObj(cobj.filename, interpolation=False) + if secname is not None: + if secname not in newobj: + return baseobj.get(self.name) + newobj = newobj[secname] + + if self.name in newobj: + baseobj[self.name] = newobj[self.name] + return baseobj.get(self.name) + + def __repr__(self): + out = '<{0}: name={1!r} value={2!r} at 0x{3:x}>'.format( + self.__class__.__name__, self.name, self(), id(self)) + return out + + def __str__(self): + out = '\n'.join(('{0}: {1}', + ' cfgtype={2!r}', + ' defaultvalue={3!r}', + ' description={4!r}', + ' module={5}', + ' value={6!r}')) + out = out.format(self.__class__.__name__, self.name, self.cfgtype, + self.defaultvalue, self.description, self.module, + self()) + return out + + def __call__(self): + """ Returns the value of this ``ConfigItem`` + + Returns + ------- + val + This item's value, with a type determined by the ``cfgtype`` + attribute. + + Raises + ------ + TypeError + If the configuration value as stored is not this item's type. + """ + def section_name(section): + if section == '': + return 'at the top-level' + else: + return 'in section [{0}]'.format(section) + + options = [] + sec = get_config(self.module) + if self.name in sec: + options.append((sec[self.name], self.module, self.name)) + + for alias in self.aliases: + module, name = alias.rsplit('.', 1) + sec = get_config(module) + if '.' in module: + filename, module = module.split('.', 1) + else: + filename = module + module = '' + if name in sec: + if '.' in self.module: + new_module = self.module.split('.', 1)[1] + else: + new_module = '' + warn( + "Config parameter '{0}' {1} of the file '{2}' " + "is deprecated. Use '{3}' {4} instead.".format( + name, section_name(module), get_config_filename(filename), + self.name, section_name(new_module)), + AstropyDeprecationWarning) + options.append((sec[name], module, name)) + + if len(options) == 0: + self.set(self.defaultvalue) + options.append((self.defaultvalue, None, None)) + + if len(options) > 1: + filename, sec = self.module.split('.', 1) + warn( + "Config parameter '{0}' {1} of the file '{2}' is " + "given by more than one alias ({3}). Using the first.".format( + self.name, section_name(sec), get_config_filename(filename), + ', '.join([ + '.'.join(x[1:3]) for x in options if x[1] is not None])), + AstropyDeprecationWarning) + + val = options[0][0] + + try: + return self._validate_val(val) + except validate.ValidateError as e: + raise TypeError('Configuration value not valid:' + e.args[0]) + + def _validate_val(self, val): + """ Validates the provided value based on cfgtype and returns the + type-cast value + + throws the underlying configobj exception if it fails + """ + # note that this will normally use the *class* attribute `_validator`, + # but if some arcane reason is needed for making a special one for an + # instance or sub-class, it will be used + return self._validator.check(self.cfgtype, val) + + +# this dictionary stores the master copy of the ConfigObj's for each +# root package +_cfgobjs = {} + + +def get_config_filename(packageormod=None): + """ + Get the filename of the config file associated with the given + package or module. + """ + cfg = get_config(packageormod) + while cfg.parent is not cfg: + cfg = cfg.parent + return cfg.filename + + +# This is used by testing to override the config file, so we can test +# with various config files that exercise different features of the +# config system. +_override_config_file = None + + +def get_config(packageormod=None, reload=False): + """ Gets the configuration object or section associated with a particular + package or module. + + Parameters + ----------- + packageormod : str or None + The package for which to retrieve the configuration object. If a + string, it must be a valid package name, or if `None`, the package from + which this function is called will be used. + + reload : bool, optional + Reload the file, even if we have it cached. + + Returns + ------- + cfgobj : ``configobj.ConfigObj`` or ``configobj.Section`` + If the requested package is a base package, this will be the + ``configobj.ConfigObj`` for that package, or if it is a subpackage or + module, it will return the relevant ``configobj.Section`` object. + + Raises + ------ + RuntimeError + If ``packageormod`` is `None`, but the package this item is created + from cannot be determined. + """ + if packageormod is None: + packageormod = find_current_module(2) + if packageormod is None: + msg1 = 'Cannot automatically determine get_config module, ' + msg2 = 'because it is not called from inside a valid module' + raise RuntimeError(msg1 + msg2) + else: + packageormod = packageormod.__name__ + + packageormodspl = packageormod.split('.') + rootname = packageormodspl[0] + secname = '.'.join(packageormodspl[1:]) + + cobj = _cfgobjs.get(rootname, None) + + if cobj is None or reload: + if _ASTROPY_SETUP_: + # There's no reason to use anything but the default config + cobj = configobj.ConfigObj(interpolation=False) + else: + cfgfn = None + try: + # This feature is intended only for use by the unit tests + if _override_config_file is not None: + cfgfn = _override_config_file + else: + cfgfn = path.join(get_config_dir(), rootname + '.cfg') + cobj = configobj.ConfigObj(cfgfn, interpolation=False) + except (IOError, OSError) as e: + msg = ('Configuration defaults will be used due to ') + errstr = '' if len(e.args) < 1 else (':' + str(e.args[0])) + msg += e.__class__.__name__ + errstr + msg += ' on {0}'.format(cfgfn) + warn(ConfigurationMissingWarning(msg)) + + # This caches the object, so if the file becomes accessible, this + # function won't see it unless the module is reloaded + cobj = configobj.ConfigObj(interpolation=False) + + _cfgobjs[rootname] = cobj + + if secname: # not the root package + if secname not in cobj: + cobj[secname] = {} + return cobj[secname] + else: + return cobj + + +def reload_config(packageormod=None): + """ Reloads configuration settings from a configuration file for the root + package of the requested package/module. + + This overwrites any changes that may have been made in `ConfigItem` + objects. This applies for any items that are based on this file, which + is determined by the *root* package of ``packageormod`` + (e.g. ``'astropy.cfg'`` for the ``'astropy.config.configuration'`` + module). + + Parameters + ---------- + packageormod : str or None + The package or module name - see `get_config` for details. + """ + sec = get_config(packageormod, True) + # look for the section that is its own parent - that's the base object + while sec.parent is not sec: + sec = sec.parent + sec.reload() + + +def is_unedited_config_file(content, template_content=None): + """ + Determines if a config file can be safely replaced because it doesn't + actually contain any meaningful content. + + To meet this criteria, the config file must be either: + + - All comments or completely empty + + - An exact match to a "legacy" version of the config file prior to + Astropy 0.4, when APE3 was implemented and the config file + contained commented-out values by default. + """ + # We want to calculate the md5sum using universal line endings, so + # that even if the files had their line endings converted to \r\n + # on Windows, this will still work. + + content = content.encode('latin-1') + + # The jquery_url setting, present in 0.3.2 and later only, is + # effectively auto-generated by the build system, so we need to + # ignore it in the md5sum calculation for 0.3.2. + content = re.sub(br'\njquery_url\s*=\s*[^\n]+', b'', content) + + # First determine if the config file has any effective content + buffer = io.BytesIO(content) + buffer.seek(0) + raw_cfg = configobj.ConfigObj(buffer, interpolation=True) + for v in six.itervalues(raw_cfg): + if len(v): + break + else: + return True + + # Now determine if it matches the md5sum of a known, unedited + # config file. + known_configs = set([ + '7d4b4f1120304b286d71f205975b1286', # v0.3.2 + '5df7e409425e5bfe7ed041513fda3288', # v0.3 + '8355f99a01b3bdfd8761ef45d5d8b7e5', # v0.2 + '4ea5a84de146dc3fcea2a5b93735e634' # v0.2.1, v0.2.2, v0.2.3, v0.2.4, v0.2.5 + ]) + + md5 = hashlib.md5() + md5.update(content) + digest = md5.hexdigest() + return digest in known_configs + + +# this is not in __all__ because it's not intended that a user uses it +def update_default_config(pkg, default_cfg_dir_or_fn, version=None): + """ + Checks if the configuration file for the specified package exists, + and if not, copy over the default configuration. If the + configuration file looks like it has already been edited, we do + not write over it, but instead write a file alongside it named + ``pkg.version.cfg`` as a "template" for the user. + + Parameters + ---------- + pkg : str + The package to be updated. + default_cfg_dir_or_fn : str + The filename or directory name where the default configuration file is. + If a directory name, ``'pkg.cfg'`` will be used in that directory. + version : str, optional + The current version of the given package. If not provided, it will + be obtained from ``pkg.__version__``. + + Returns + ------- + updated : bool + If the profile was updated, `True`, otherwise `False`. + + Raises + ------ + AttributeError + If the version number of the package could not determined. + + """ + + if path.isdir(default_cfg_dir_or_fn): + default_cfgfn = path.join(default_cfg_dir_or_fn, pkg + '.cfg') + else: + default_cfgfn = default_cfg_dir_or_fn + + if not path.isfile(default_cfgfn): + # There is no template configuration file, which basically + # means the affiliated package is not using the configuration + # system, so just return. + return False + + cfgfn = get_config(pkg).filename + + with io.open(default_cfgfn, 'rt', encoding='latin-1') as fr: + template_content = fr.read() + + doupdate = False + if cfgfn is not None: + if path.exists(cfgfn): + with io.open(cfgfn, 'rt', encoding='latin-1') as fd: + content = fd.read() + + identical = (content == template_content) + + if not identical: + doupdate = is_unedited_config_file( + content, template_content) + elif path.exists(path.dirname(cfgfn)): + doupdate = True + identical = False + + if version is None: + version = resolve_name(pkg, '__version__') + + # Don't install template files for dev versions, or we'll end up + # spamming `~/.astropy/config`. + if 'dev' not in version and cfgfn is not None: + template_path = path.join( + get_config_dir(), '{0}.{1}.cfg'.format(pkg, version)) + needs_template = not path.exists(template_path) + else: + needs_template = False + + if doupdate or needs_template: + if needs_template: + with io.open(template_path, 'wt', encoding='latin-1') as fw: + fw.write(template_content) + # If we just installed a new template file and we can't + # update the main configuration file because it has user + # changes, display a warning. + if not identical and not doupdate: + warn( + "The configuration options in {0} {1} may have changed, " + "your configuration file was not updated in order to " + "preserve local changes. A new configuration template " + "has been saved to '{2}'.".format( + pkg, version, template_path), + ConfigurationChangedWarning) + + if doupdate and not identical: + with io.open(cfgfn, 'wt', encoding='latin-1') as fw: + fw.write(template_content) + return True + + return False diff --git a/astropy/config/paths.py b/astropy/config/paths.py new file mode 100644 index 0000000..dd01621 --- /dev/null +++ b/astropy/config/paths.py @@ -0,0 +1,315 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" This module contains functions to determine where configuration and +data/cache files used by Astropy should be placed. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from ..extern import six +from ..utils.decorators import wraps + +import os +import shutil +import sys + + +__all__ = ['get_config_dir', 'get_cache_dir', 'set_temp_config', + 'set_temp_cache'] + + +def _find_home(): + """ Locates and return the home directory (or best approximation) on this + system. + + Raises + ------ + OSError + If the home directory cannot be located - usually means you are running + Astropy on some obscure platform that doesn't have standard home + directories. + """ + + # this is used below to make fix up encoding issues that sometimes crop up + # in py2.x but not in py3.x + if six.PY2: + decodepath = lambda pth: pth.decode(sys.getfilesystemencoding()) + else: + decodepath = lambda pth: pth + + # First find the home directory - this is inspired by the scheme ipython + # uses to identify "home" + if os.name == 'posix': + # Linux, Unix, AIX, OS X + if 'HOME' in os.environ: + homedir = decodepath(os.environ['HOME']) + else: + raise OSError('Could not find unix home directory to search for ' + 'astropy config dir') + elif os.name == 'nt': # This is for all modern Windows (NT or after) + if 'MSYSTEM' in os.environ and os.environ.get('HOME'): + # Likely using an msys shell; use whatever it is using for its + # $HOME directory + homedir = decodepath(os.environ['HOME']) + # Next try for a network home + elif 'HOMESHARE' in os.environ: + homedir = decodepath(os.environ['HOMESHARE']) + # See if there's a local home + elif 'HOMEDRIVE' in os.environ and 'HOMEPATH' in os.environ: + homedir = os.path.join(os.environ['HOMEDRIVE'], + os.environ['HOMEPATH']) + homedir = decodepath(homedir) + # Maybe a user profile? + elif 'USERPROFILE' in os.environ: + homedir = decodepath(os.path.join(os.environ['USERPROFILE'])) + else: + try: + from ..extern.six.moves import winreg as wreg + shell_folders = r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders' + key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, shell_folders) + + homedir = wreg.QueryValueEx(key, 'Personal')[0] + homedir = decodepath(homedir) + key.Close() + except Exception: + # As a final possible resort, see if HOME is present + if 'HOME' in os.environ: + homedir = decodepath(os.environ['HOME']) + else: + raise OSError('Could not find windows home directory to ' + 'search for astropy config dir') + else: + # for other platforms, try HOME, although it probably isn't there + if 'HOME' in os.environ: + homedir = decodepath(os.environ['HOME']) + else: + raise OSError('Could not find a home directory to search for ' + 'astropy config dir - are you on an unspported ' + 'platform?') + return homedir + + +def get_config_dir(create=True): + """ + Determines the Astropy configuration directory name and creates the + directory if it doesn't exist. + + This directory is typically ``$HOME/.astropy/config``, but if the + XDG_CONFIG_HOME environment variable is set and the + ``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory. + If neither exists, the former will be created and symlinked to the latter. + + Returns + ------- + configdir : str + The absolute path to the configuration directory. + + """ + + # symlink will be set to this if the directory is created + linkto = None + + # If using set_temp_config, that overrides all + if set_temp_config._temp_path is not None: + xch = set_temp_config._temp_path + config_path = os.path.join(xch, 'astropy') + if not os.path.exists(config_path): + os.mkdir(config_path) + return os.path.abspath(config_path) + + # first look for XDG_CONFIG_HOME + xch = os.environ.get('XDG_CONFIG_HOME') + + if xch is not None and os.path.exists(xch): + xchpth = os.path.join(xch, 'astropy') + if not os.path.islink(xchpth): + if os.path.exists(xchpth): + return os.path.abspath(xchpth) + else: + linkto = xchpth + return os.path.abspath(_find_or_create_astropy_dir('config', linkto)) + + +def get_cache_dir(): + """ + Determines the Astropy cache directory name and creates the directory if it + doesn't exist. + + This directory is typically ``$HOME/.astropy/cache``, but if the + XDG_CACHE_HOME environment variable is set and the + ``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory. + If neither exists, the former will be created and symlinked to the latter. + + Returns + ------- + cachedir : str + The absolute path to the cache directory. + + """ + + # symlink will be set to this if the directory is created + linkto = None + + # If using set_temp_cache, that overrides all + if set_temp_cache._temp_path is not None: + xch = set_temp_cache._temp_path + cache_path = os.path.join(xch, 'astropy') + if not os.path.exists(cache_path): + os.mkdir(cache_path) + return os.path.abspath(cache_path) + + # first look for XDG_CACHE_HOME + xch = os.environ.get('XDG_CACHE_HOME') + + if xch is not None and os.path.exists(xch): + xchpth = os.path.join(xch, 'astropy') + if not os.path.islink(xchpth): + if os.path.exists(xchpth): + return os.path.abspath(xchpth) + else: + linkto = xchpth + + return os.path.abspath(_find_or_create_astropy_dir('cache', linkto)) + + +class _SetTempPath(object): + _temp_path = None + _default_path_getter = None + + def __init__(self, path=None, delete=False): + if path is not None: + path = os.path.abspath(path) + + self._path = path + self._delete = delete + self._prev_path = self.__class__._temp_path + + def __enter__(self): + self.__class__._temp_path = self._path + return self._default_path_getter() + + def __exit__(self, *args): + self.__class__._temp_path = self._prev_path + + if self._delete and self._path is not None: + shutil.rmtree(self._path) + + def __call__(self, func): + """Implements use as a decorator.""" + + @wraps(func) + def wrapper(*args, **kwargs): + with self: + func(*args, **kwargs) + + return wrapper + + +class set_temp_config(_SetTempPath): + """ + Context manager to set a temporary path for the Astropy config, primarily + for use with testing. + + If the path set by this context manager does not already exist it will be + created, if possible. + + This may also be used as a decorator on a function to set the config path + just within that function. + + Parameters + ---------- + + path : str, optional + The directory (which must exist) in which to find the Astropy config + files, or create them if they do not already exist. If None, this + restores the config path to the user's default config path as returned + by `get_config_dir` as though this context manager were not in effect + (this is useful for testing). In this case the ``delete`` argument is + always ignored. + + delete : bool, optional + If True, cleans up the temporary directory after exiting the temp + context (default: False). + """ + + _default_path_getter = staticmethod(get_config_dir) + + def __enter__(self): + # Special case for the config case, where we need to reset all the + # cached config objects + from .configuration import _cfgobjs + + path = super(set_temp_config, self).__enter__() + _cfgobjs.clear() + return path + + def __exit__(self, *args): + from .configuration import _cfgobjs + + super(set_temp_config, self).__exit__(*args) + _cfgobjs.clear() + + +class set_temp_cache(_SetTempPath): + """ + Context manager to set a temporary path for the Astropy download cache, + primarily for use with testing (though there may be other applications + for setting a different cache directory, for example to switch to a cache + dedicated to large files). + + If the path set by this context manager does not already exist it will be + created, if possible. + + This may also be used as a decorator on a function to set the cache path + just within that function. + + Parameters + ---------- + + path : str + The directory (which must exist) in which to find the Astropy cache + files, or create them if they do not already exist. If None, this + restores the cache path to the user's default cache path as returned + by `get_cache_dir` as though this context manager were not in effect + (this is useful for testing). In this case the ``delete`` argument is + always ignored. + + delete : bool, optional + If True, cleans up the temporary directory after exiting the temp + context (default: False). + """ + + _default_path_getter = staticmethod(get_cache_dir) + + +def _find_or_create_astropy_dir(dirnm, linkto): + innerdir = os.path.join(_find_home(), '.astropy') + maindir = os.path.join(_find_home(), '.astropy', dirnm) + + if not os.path.exists(maindir): + # first create .astropy dir if needed + if not os.path.exists(innerdir): + try: + os.mkdir(innerdir) + except OSError: + if not os.path.isdir(innerdir): + raise + elif not os.path.isdir(innerdir): + msg = 'Intended Astropy directory {0} is actually a file.' + raise IOError(msg.format(innerdir)) + + try: + os.mkdir(maindir) + except OSError: + if not os.path.isdir(maindir): + raise + + if (not sys.platform.startswith('win') and + linkto is not None and + not os.path.exists(linkto)): + os.symlink(maindir, linkto) + + elif not os.path.isdir(maindir): + msg = 'Intended Astropy {0} directory {1} is actually a file.' + raise IOError(msg.format(dirnm, maindir)) + + return os.path.abspath(maindir) diff --git a/astropy/config/setup_package.py b/astropy/config/setup_package.py new file mode 100644 index 0000000..e4a1c0f --- /dev/null +++ b/astropy/config/setup_package.py @@ -0,0 +1,11 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + + +def get_package_data(): + return { + str('astropy.config.tests'): ['data/*.cfg'] + } + + +def requires_2to3(): + return False diff --git a/astropy/config/tests/__init__.py b/astropy/config/tests/__init__.py new file mode 100644 index 0000000..800d82e --- /dev/null +++ b/astropy/config/tests/__init__.py @@ -0,0 +1,2 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) diff --git a/astropy/config/tests/data/alias.cfg b/astropy/config/tests/data/alias.cfg new file mode 100644 index 0000000..612cdd9 --- /dev/null +++ b/astropy/config/tests/data/alias.cfg @@ -0,0 +1,2 @@ +[coordinates.name_resolve] +name_resolve_timeout = 42.0 \ No newline at end of file diff --git a/astropy/config/tests/data/astropy.0.3.cfg b/astropy/config/tests/data/astropy.0.3.cfg new file mode 100644 index 0000000..cafa0e4 --- /dev/null +++ b/astropy/config/tests/data/astropy.0.3.cfg @@ -0,0 +1,149 @@ + +# Use Unicode characters when outputting values, and writing widgets to the +# console. +unicode_output = False +[utils.console] + +# When True, use ANSI color escape sequences when writing to the console. +use_color = True + +[logger] + +# Threshold for the logging messages. Logging messages that are less severe +# than this level will be ignored. The levels are 'DEBUG', 'INFO', 'WARNING', +# 'ERROR' +log_level = INFO + +# Whether to use color for the level names +use_color = True + +# Whether to log warnings.warn calls +log_warnings = True + +# Whether to log exceptions before raising them +log_exceptions = False + +# Whether to always log messages to a log file +log_to_file = False + +# The file to log messages to. When '', it defaults to a file 'astropy.log' in +# the astropy config directory. +log_file_path = "" + +# Threshold for logging messages to log_file_path +log_file_level = INFO + +# Format for log file entries +log_file_format = "%(asctime)r, %(origin)r, %(levelname)r, %(message)r" + +[coordinates.name_resolve] + +# The URL to Sesame's web-queryable database. +sesame_url = http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame/, http://vizier.cfa.harvard.edu/viz-bin/nph-sesame/ + +# This specifies the default database that SESAME will query when using the +# name resolve mechanism in the coordinates subpackage. Default is to search +# all databases, but this can be 'all', 'simbad', 'ned', or 'vizier'. +# Options: all, simbad, ned, vizier +sesame_database = all + +# This is the maximum time to wait for a response from a name resolve query to +# SESAME in seconds. +name_resolve_timeout = 5 + +[table.pprint] + +# Maximum number of lines for the pretty-printer to use if it cannot determine +# the terminal size. Negative numbers mean no limit. +max_lines = 25 + +# Maximum number of characters for the pretty-printer to use per line if it +# cannot determine the terminal size. Negative numbers mean no limit. +max_width = 80 + +[table.table] + +# The template that determines the name of a column if it cannot be +# determined. Uses new-style (format method) string formatting +auto_colname = col{0} + +[utils.data] + +# URL for astropy remote data site. +dataurl = http://data.astropy.org/ + +# Time to wait for remote data query (in seconds). +remote_timeout = 3.0 + +# Block size for computing MD5 file hashes. +hash_block_size = 65536 + +# Number of bytes of remote data to download per step. +download_block_size = 65536 + +# Number of times to try to get the lock while accessing the data cache before +# giving up. +download_cache_lock_attempts = 5 + +# If True, temporary download files created when the cache is inacessible will +# be deleted at the end of the python session. +delete_temporary_downloads_at_exit = True + +[io.fits] + +# If True, enable support for record-valued keywords as described by FITS WCS +# Paper IV. Otherwise they are treated as normal keywords. +enabled_record_valued_keyword_cards = True + +# If True, extension names (i.e. the EXTNAME keyword) should be treated as +# case-sensitive. +extension_name_case_sensitive = False + +# If True, automatically remove trailing whitespace for string values in +# headers. Otherwise the values are returned verbatim, with all whitespace +# intact. +strip_header_whitespace = True + +# If True, use memory-mapped file access to read/write the data in FITS files. +# This generally provides better performance, especially for large files, but +# may affect performance in I/O-heavy applications. +use_memmap = True + +[io.votable.table] + +# When True, treat fixable violations of the VOTable spec as exceptions. +pedantic = False + +[cosmology.core] + +# The default cosmology to use. Note this is only read on import, so changing +# this value at runtime has no effect. +default_cosmology = no_default + +[nddata.nddata] + +# Whether to issue a warning if NDData arithmetic is performed with +# uncertainties and the uncertainties do not support the propagation of +# correlated uncertainties. +warn_unsupported_correlated = True + +[vo.client.vos_catalog] + +# URL where VO Service database file is stored. +vos_baseurl = http://stsdas.stsci.edu/astrolib/vo_databases/ + +[vo.client.conesearch] + +# Conesearch database name. +conesearch_dbname = conesearch_good + +[vo.validator.validate] + +# Cone Search services master list for validation. +cs_mstr_list = http://vao.stsci.edu/directory/NVORegInt.asmx/VOTCapabilityPredOpt?predicate=1%3D1&capability=conesearch&VOTStyleOption=2 + +# Only check these Cone Search URLs. +cs_urls = http://archive.noao.edu/nvo/usno.php?cat=a&, http://gsss.stsci.edu/webservices/vo/ConeSearch.aspx?CAT=GSC23&, http://irsa.ipac.caltech.edu/cgi-bin/Oasis/CatSearch/nph-catsearch?CAT=fp_psc&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/220/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/243/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/252/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/254/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/255/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/284/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=II/246/out&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=field&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=photoobjall&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=phototag&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=specobjall&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=specphotoall&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=sppparams&, http://vo.astronet.ru/sai_cas/conesearch?cat=twomass&tab=psc&, http://vo.astronet.ru/sai_cas/conesearch?cat=twomass&tab=xsc&, http://vo.astronet.ru/sai_cas/conesearch?cat=usnoa2&tab=main&, http://vo.astronet.ru/sai_cas/conesearch?cat=usnob1&tab=main&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=Galaxy&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=PhotoObj&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=PhotoObjAll&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=Star&, http://wfaudata.roe.ac.uk/sdssdr8-dsa/DirectCone?DSACAT=SDSS_DR8&DSATAB=PhotoObjAll&, http://wfaudata.roe.ac.uk/sdssdr8-dsa/DirectCone?DSACAT=SDSS_DR8&DSATAB=SpecObjAll&, http://wfaudata.roe.ac.uk/twomass-dsa/DirectCone?DSACAT=TWOMASS&DSATAB=twomass_psc&, http://wfaudata.roe.ac.uk/twomass-dsa/DirectCone?DSACAT=TWOMASS&DSATAB=twomass_xsc&, http://www.nofs.navy.mil/cgi-bin/vo_cone.cgi?CAT=USNO-A2&, http://www.nofs.navy.mil/cgi-bin/vo_cone.cgi?CAT=USNO-B1& + +# VO Table warning codes that are considered non-critical +noncrit_warnings = W03, W06, W07, W09, W10, W15, W17, W20, W21, W22, W27, W28, W29, W41, W42, W48, W50 diff --git a/astropy/config/tests/data/astropy.0.3.windows.cfg b/astropy/config/tests/data/astropy.0.3.windows.cfg new file mode 100644 index 0000000..589703f --- /dev/null +++ b/astropy/config/tests/data/astropy.0.3.windows.cfg @@ -0,0 +1,149 @@ + +# Use Unicode characters when outputting values, and writing widgets to the +# console. +unicode_output = False +[utils.console] + +# When True, use ANSI color escape sequences when writing to the console. +use_color = True + +[logger] + +# Threshold for the logging messages. Logging messages that are less severe +# than this level will be ignored. The levels are 'DEBUG', 'INFO', 'WARNING', +# 'ERROR' +log_level = INFO + +# Whether to use color for the level names +use_color = True + +# Whether to log warnings.warn calls +log_warnings = True + +# Whether to log exceptions before raising them +log_exceptions = False + +# Whether to always log messages to a log file +log_to_file = False + +# The file to log messages to. When '', it defaults to a file 'astropy.log' in +# the astropy config directory. +log_file_path = "" + +# Threshold for logging messages to log_file_path +log_file_level = INFO + +# Format for log file entries +log_file_format = "%(asctime)r, %(origin)r, %(levelname)r, %(message)r" + +[coordinates.name_resolve] + +# The URL to Sesame's web-queryable database. +sesame_url = http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame/, http://vizier.cfa.harvard.edu/viz-bin/nph-sesame/ + +# This specifies the default database that SESAME will query when using the +# name resolve mechanism in the coordinates subpackage. Default is to search +# all databases, but this can be 'all', 'simbad', 'ned', or 'vizier'. +# Options: all, simbad, ned, vizier +sesame_database = all + +# This is the maximum time to wait for a response from a name resolve query to +# SESAME in seconds. +name_resolve_timeout = 5 + +[table.pprint] + +# Maximum number of lines for the pretty-printer to use if it cannot determine +# the terminal size. Negative numbers mean no limit. +max_lines = 25 + +# Maximum number of characters for the pretty-printer to use per line if it +# cannot determine the terminal size. Negative numbers mean no limit. +max_width = 80 + +[table.table] + +# The template that determines the name of a column if it cannot be +# determined. Uses new-style (format method) string formatting +auto_colname = col{0} + +[utils.data] + +# URL for astropy remote data site. +dataurl = http://data.astropy.org/ + +# Time to wait for remote data query (in seconds). +remote_timeout = 3.0 + +# Block size for computing MD5 file hashes. +hash_block_size = 65536 + +# Number of bytes of remote data to download per step. +download_block_size = 65536 + +# Number of times to try to get the lock while accessing the data cache before +# giving up. +download_cache_lock_attempts = 5 + +# If True, temporary download files created when the cache is inacessible will +# be deleted at the end of the python session. +delete_temporary_downloads_at_exit = True + +[io.fits] + +# If True, enable support for record-valued keywords as described by FITS WCS +# Paper IV. Otherwise they are treated as normal keywords. +enabled_record_valued_keyword_cards = True + +# If True, extension names (i.e. the EXTNAME keyword) should be treated as +# case-sensitive. +extension_name_case_sensitive = False + +# If True, automatically remove trailing whitespace for string values in +# headers. Otherwise the values are returned verbatim, with all whitespace +# intact. +strip_header_whitespace = True + +# If True, use memory-mapped file access to read/write the data in FITS files. +# This generally provides better performance, especially for large files, but +# may affect performance in I/O-heavy applications. +use_memmap = True + +[io.votable.table] + +# When True, treat fixable violations of the VOTable spec as exceptions. +pedantic = False + +[cosmology.core] + +# The default cosmology to use. Note this is only read on import, so changing +# this value at runtime has no effect. +default_cosmology = no_default + +[nddata.nddata] + +# Whether to issue a warning if NDData arithmetic is performed with +# uncertainties and the uncertainties do not support the propagation of +# correlated uncertainties. +warn_unsupported_correlated = True + +[vo.client.vos_catalog] + +# URL where VO Service database file is stored. +vos_baseurl = http://stsdas.stsci.edu/astrolib/vo_databases/ + +[vo.client.conesearch] + +# Conesearch database name. +conesearch_dbname = conesearch_good + +[vo.validator.validate] + +# Cone Search services master list for validation. +cs_mstr_list = http://vao.stsci.edu/directory/NVORegInt.asmx/VOTCapabilityPredOpt?predicate=1%3D1&capability=conesearch&VOTStyleOption=2 + +# Only check these Cone Search URLs. +cs_urls = http://archive.noao.edu/nvo/usno.php?cat=a&, http://gsss.stsci.edu/webservices/vo/ConeSearch.aspx?CAT=GSC23&, http://irsa.ipac.caltech.edu/cgi-bin/Oasis/CatSearch/nph-catsearch?CAT=fp_psc&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/220/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/243/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/252/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/254/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/255/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=I/284/out&, http://vizier.u-strasbg.fr/viz-bin/votable/-A?-source=II/246/out&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=field&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=photoobjall&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=phototag&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=specobjall&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=specphotoall&, http://vo.astronet.ru/sai_cas/conesearch?cat=sdssdr7&tab=sppparams&, http://vo.astronet.ru/sai_cas/conesearch?cat=twomass&tab=psc&, http://vo.astronet.ru/sai_cas/conesearch?cat=twomass&tab=xsc&, http://vo.astronet.ru/sai_cas/conesearch?cat=usnoa2&tab=main&, http://vo.astronet.ru/sai_cas/conesearch?cat=usnob1&tab=main&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=Galaxy&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=PhotoObj&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=PhotoObjAll&, http://wfaudata.roe.ac.uk/sdssdr7-dsa/DirectCone?DSACAT=SDSS_DR7&DSATAB=Star&, http://wfaudata.roe.ac.uk/sdssdr8-dsa/DirectCone?DSACAT=SDSS_DR8&DSATAB=PhotoObjAll&, http://wfaudata.roe.ac.uk/sdssdr8-dsa/DirectCone?DSACAT=SDSS_DR8&DSATAB=SpecObjAll&, http://wfaudata.roe.ac.uk/twomass-dsa/DirectCone?DSACAT=TWOMASS&DSATAB=twomass_psc&, http://wfaudata.roe.ac.uk/twomass-dsa/DirectCone?DSACAT=TWOMASS&DSATAB=twomass_xsc&, http://www.nofs.navy.mil/cgi-bin/vo_cone.cgi?CAT=USNO-A2&, http://www.nofs.navy.mil/cgi-bin/vo_cone.cgi?CAT=USNO-B1& + +# VO Table warning codes that are considered non-critical +noncrit_warnings = W03, W06, W07, W09, W10, W15, W17, W20, W21, W22, W27, W28, W29, W41, W42, W48, W50 diff --git a/astropy/config/tests/data/deprecated.cfg b/astropy/config/tests/data/deprecated.cfg new file mode 100644 index 0000000..a6cb084 --- /dev/null +++ b/astropy/config/tests/data/deprecated.cfg @@ -0,0 +1,2 @@ +[table.pprint] +max_lines = 25 diff --git a/astropy/config/tests/data/empty.cfg b/astropy/config/tests/data/empty.cfg new file mode 100644 index 0000000..a069dfd --- /dev/null +++ b/astropy/config/tests/data/empty.cfg @@ -0,0 +1,15 @@ +## Use Unicode characters when outputting values, and writing widgets to the +## console. +#unicode_output = False + +[utils.console] + +## When True, use ANSI color escape sequences when writing to the console. +# use_color = True + +[logger] + +## Threshold for the logging messages. Logging messages that are less severe +## than this level will be ignored. The levels are 'DEBUG', 'INFO', 'WARNING', +## 'ERROR' +# log_level = INFO diff --git a/astropy/config/tests/data/not_empty.cfg b/astropy/config/tests/data/not_empty.cfg new file mode 100644 index 0000000..c7a660f --- /dev/null +++ b/astropy/config/tests/data/not_empty.cfg @@ -0,0 +1,15 @@ +## Use Unicode characters when outputting values, and writing widgets to the +## console. +#unicode_output = False + +[utils.console] + +## When True, use ANSI color escape sequences when writing to the console. +# use_color = True + +[logger] + +## Threshold for the logging messages. Logging messages that are less severe +## than this level will be ignored. The levels are 'DEBUG', 'INFO', 'WARNING', +## 'ERROR' +log_level = INFO diff --git a/astropy/config/tests/test_configs.py b/astropy/config/tests/test_configs.py new file mode 100644 index 0000000..2d20d11 --- /dev/null +++ b/astropy/config/tests/test_configs.py @@ -0,0 +1,358 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import io +import os +import sys +import subprocess + +import pytest + +from ...tests.helper import catch_warnings +from ...extern import six + +from ...utils.data import get_pkg_data_filename +from .. import configuration +from .. import paths +from ...utils.exceptions import AstropyDeprecationWarning + + +def test_paths(): + assert 'astropy' in paths.get_config_dir() + assert 'astropy' in paths.get_cache_dir() + + +def test_set_temp_config(tmpdir, monkeypatch): + monkeypatch.setattr(paths.set_temp_config, '_temp_path', None) + + orig_config_dir = paths.get_config_dir() + temp_config_dir = str(tmpdir.mkdir('config')) + temp_astropy_config = os.path.join(temp_config_dir, 'astropy') + + # Test decorator mode + @paths.set_temp_config(temp_config_dir) + def test_func(): + assert paths.get_config_dir() == temp_astropy_config + + # Test temporary restoration of original default + with paths.set_temp_config() as d: + assert d == orig_config_dir == paths.get_config_dir() + + test_func() + + # Test context manager mode (with cleanup) + with paths.set_temp_config(temp_config_dir, delete=True): + assert paths.get_config_dir() == temp_astropy_config + + assert not os.path.exists(temp_config_dir) + + +def test_set_temp_cache(tmpdir, monkeypatch): + monkeypatch.setattr(paths.set_temp_cache, '_temp_path', None) + + orig_cache_dir = paths.get_cache_dir() + temp_cache_dir = str(tmpdir.mkdir('cache')) + temp_astropy_cache = os.path.join(temp_cache_dir, 'astropy') + + # Test decorator mode + @paths.set_temp_cache(temp_cache_dir) + def test_func(): + assert paths.get_cache_dir() == temp_astropy_cache + + # Test temporary restoration of original default + with paths.set_temp_cache() as d: + assert d == orig_cache_dir == paths.get_cache_dir() + + test_func() + + # Test context manager mode (with cleanup) + with paths.set_temp_cache(temp_cache_dir, delete=True): + assert paths.get_cache_dir() == temp_astropy_cache + + assert not os.path.exists(temp_cache_dir) + + +def test_config_file(): + from ..configuration import get_config, reload_config + + apycfg = get_config('astropy') + assert apycfg.filename.endswith('astropy.cfg') + + cfgsec = get_config('astropy.config') + assert cfgsec.depth == 1 + assert cfgsec.name == 'config' + assert cfgsec.parent.filename.endswith('astropy.cfg') + + reload_config('astropy') + + +def test_configitem(): + + from ..configuration import ConfigNamespace, ConfigItem, get_config + + ci = ConfigItem(34, 'this is a Description') + + class Conf(ConfigNamespace): + tstnm = ci + + conf = Conf() + + assert ci.module == 'astropy.config.tests.test_configs' + assert ci() == 34 + assert ci.description == 'this is a Description' + + assert conf.tstnm == 34 + + sec = get_config(ci.module) + assert sec['tstnm'] == 34 + + ci.description = 'updated Descr' + ci.set(32) + assert ci() == 32 + + # It's useful to go back to the default to allow other test functions to + # call this one and still be in the default configuration. + ci.description = 'this is a Description' + ci.set(34) + assert ci() == 34 + + +def test_configitem_types(): + + from ..configuration import ConfigNamespace, ConfigItem + + cio = ConfigItem(['op1', 'op2', 'op3']) + + class Conf(ConfigNamespace): + tstnm1 = ConfigItem(34) + tstnm2 = ConfigItem(34.3) + tstnm3 = ConfigItem(True) + tstnm4 = ConfigItem('astring') + + conf = Conf() + + assert isinstance(conf.tstnm1, int) + assert isinstance(conf.tstnm2, float) + assert isinstance(conf.tstnm3, bool) + assert isinstance(conf.tstnm4, six.text_type) + + with pytest.raises(TypeError): + conf.tstnm1 = 34.3 + conf.tstnm2 = 12 # this would should succeed as up-casting + with pytest.raises(TypeError): + conf.tstnm3 = 'fasd' + with pytest.raises(TypeError): + conf.tstnm4 = 546.245 + + +def test_configitem_options(tmpdir): + + from ..configuration import ConfigNamespace, ConfigItem, get_config + + cio = ConfigItem(['op1', 'op2', 'op3']) + + class Conf(ConfigNamespace): + tstnmo = cio + + conf = Conf() + + sec = get_config(cio.module) + + assert isinstance(cio(), six.text_type) + assert cio() == 'op1' + assert sec['tstnmo'] == 'op1' + + cio.set('op2') + with pytest.raises(TypeError): + cio.set('op5') + assert sec['tstnmo'] == 'op2' + + # now try saving + apycfg = sec + while apycfg.parent is not apycfg: + apycfg = apycfg.parent + f = tmpdir.join('astropy.cfg') + with io.open(f.strpath, 'wb') as fd: + apycfg.write(fd) + with io.open(f.strpath, 'rU', encoding='utf-8') as fd: + lns = [x.strip() for x in f.readlines()] + + assert 'tstnmo = op2' in lns + + +def test_config_noastropy_fallback(monkeypatch): + """ + Tests to make sure configuration items fall back to their defaults when + there's a problem accessing the astropy directory + """ + + # make sure the config directory is not searched + monkeypatch.setenv(str('XDG_CONFIG_HOME'), 'foo') + monkeypatch.delenv(str('XDG_CONFIG_HOME')) + monkeypatch.setattr(paths.set_temp_config, '_temp_path', None) + + # make sure the _find_or_create_astropy_dir function fails as though the + # astropy dir could not be accessed + def osraiser(dirnm, linkto): + raise OSError + monkeypatch.setattr(paths, '_find_or_create_astropy_dir', osraiser) + + # also have to make sure the stored configuration objects are cleared + monkeypatch.setattr(configuration, '_cfgobjs', {}) + + with pytest.raises(OSError): + # make sure the config dir search fails + paths.get_config_dir() + + # now run the basic tests, and make sure the warning about no astropy + # is present + with catch_warnings(configuration.ConfigurationMissingWarning) as w: + test_configitem() + assert len(w) == 1 + w = w[0] + assert 'Configuration defaults will be used' in str(w.message) + + +def test_configitem_setters(): + + from ..configuration import ConfigNamespace, ConfigItem + + class Conf(ConfigNamespace): + tstnm12 = ConfigItem(42, 'this is another Description') + + conf = Conf() + + assert conf.tstnm12 == 42 + with conf.set_temp('tstnm12', 45): + assert conf.tstnm12 == 45 + assert conf.tstnm12 == 42 + + conf.tstnm12 = 43 + assert conf.tstnm12 == 43 + + with conf.set_temp('tstnm12', 46): + assert conf.tstnm12 == 46 + + # Make sure it is reset even with Exception + try: + with conf.set_temp('tstnm12', 47): + raise Exception + except Exception: + pass + + assert conf.tstnm12 == 43 + + +def test_empty_config_file(): + from ..configuration import is_unedited_config_file + + def get_content(fn): + with io.open(get_pkg_data_filename(fn), 'rt', encoding='latin-1') as fd: + return fd.read() + + content = get_content('data/empty.cfg') + assert is_unedited_config_file(content) + + content = get_content('data/not_empty.cfg') + assert not is_unedited_config_file(content) + + content = get_content('data/astropy.0.3.cfg') + assert is_unedited_config_file(content) + + content = get_content('data/astropy.0.3.windows.cfg') + assert is_unedited_config_file(content) + + +class TestAliasRead(object): + + def setup_class(self): + configuration._override_config_file = get_pkg_data_filename('data/alias.cfg') + + def test_alias_read(self): + from astropy.utils.data import conf + + with catch_warnings() as w: + conf.reload() + assert conf.remote_timeout == 42 + + assert len(w) == 1 + assert str(w[0].message).startswith( + "Config parameter 'name_resolve_timeout' in section " + "[coordinates.name_resolve]") + + def teardown_class(self): + from astropy.utils.data import conf + + configuration._override_config_file = None + conf.reload() + + +def test_configitem_unicode(tmpdir): + + from ..configuration import ConfigNamespace, ConfigItem, get_config + + cio = ConfigItem('ასტრონომიის') + + class Conf(ConfigNamespace): + tstunicode = cio + + conf = Conf() + + sec = get_config(cio.module) + + assert isinstance(cio(), six.text_type) + assert cio() == 'ასტრონომიის' + assert sec['tstunicode'] == 'ასტრონომიის' + + +def test_warning_move_to_top_level(): + # Check that the warning about deprecation config items in the + # file works. See #2514 + from ... import conf + + configuration._override_config_file = get_pkg_data_filename('data/deprecated.cfg') + + try: + with catch_warnings(AstropyDeprecationWarning) as w: + conf.reload() + conf.max_lines + assert len(w) == 1 + finally: + configuration._override_config_file = None + conf.reload() + + +def test_no_home(): + # "import astropy" fails when neither $HOME or $XDG_CONFIG_HOME + # are set. To test, we unset those environment variables for a + # subprocess and try to import astropy. + + test_path = os.path.dirname(__file__) + astropy_path = os.path.abspath( + os.path.join(test_path, '..', '..', '..')) + + env = os.environ.copy() + paths = [astropy_path] + if env.get('PYTHONPATH'): + paths.append(env.get('PYTHONPATH')) + env[str('PYTHONPATH')] = str(os.pathsep.join(paths)) + + for val in ['HOME', 'XDG_CONFIG_HOME']: + if val in env: + del env[val] + + retcode = subprocess.check_call( + [sys.executable, '-c', 'import astropy'], + env=env) + + assert retcode == 0 + + +def test_unedited_template(): + # Test that the config file is written at most once + config_dir = os.path.join(os.path.dirname(__file__), '..', '..') + configuration.update_default_config('astropy', config_dir) + assert configuration.update_default_config('astropy', config_dir) is False diff --git a/astropy/conftest.py b/astropy/conftest.py new file mode 100644 index 0000000..afa4cc6 --- /dev/null +++ b/astropy/conftest.py @@ -0,0 +1,16 @@ +# this contains imports plugins that configure py.test for astropy tests. +# by importing them here in conftest.py they are discoverable by py.test +# no matter how it is invoked within the astropy tree. + +from .tests.pytest_plugins import * + +try: + import matplotlib +except ImportError: + pass +else: + matplotlib.use('Agg') + +enable_deprecations_as_exceptions(include_astropy_deprecations=False) + +PYTEST_HEADER_MODULES['Cython'] = 'cython' diff --git a/astropy/constants/__init__.py b/astropy/constants/__init__.py new file mode 100644 index 0000000..24ee254 --- /dev/null +++ b/astropy/constants/__init__.py @@ -0,0 +1,56 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Contains astronomical and physical constants for use in Astropy or other +places. + +A typical use case might be:: + + >>> from astropy.constants import c, m_e + >>> # ... define the mass of something you want the rest energy of as m ... + >>> m = m_e + >>> E = m * c**2 + >>> E.to('MeV') # doctest: +FLOAT_CMP + + +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +# Hack to make circular imports with units work +try: + from .. import units + del units +except ImportError: + pass + +from .constant import Constant, EMConstant +from . import si +from . import cgs +from . import codata2014, iau2015 + +# for updating the constants module docstring +_lines = [ + 'The following constants are available:\n', + '========== ============== ================ =========================', + ' Name Value Unit Description', + '========== ============== ================ =========================', +] + +for _nm, _c in itertools.chain(sorted(vars(codata2014).items()), + sorted(vars(iau2015).items())): + if isinstance(_c, Constant) and _c.abbrev not in locals(): + locals()[_c.abbrev] = _c.__class__(_c.abbrev, _c.name, _c.value, + _c._unit_string, _c.uncertainty, + _c.reference) + + _lines.append('{0:^10} {1:^14.9g} {2:^16} {3}'.format( + _c.abbrev, _c.value, _c._unit_string, _c.name)) + +_lines.append(_lines[1]) + +if __doc__ is not None: + __doc__ += '\n'.join(_lines) + +del _lines, _nm, _c diff --git a/astropy/constants/astropyconst13.py b/astropy/constants/astropyconst13.py new file mode 100644 index 0000000..c140c0c --- /dev/null +++ b/astropy/constants/astropyconst13.py @@ -0,0 +1,20 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants for Astropy v1.3 and earlier. +See :mod:`astropy.constants` for a complete listing of constants +defined in Astropy. +""" + + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +from .constant import Constant +from . import codata2010, iau2012 + +for _nm, _c in itertools.chain(sorted(vars(codata2010).items()), + sorted(vars(iau2012).items())): + if (isinstance(_c, Constant) and _c.abbrev not in locals()): + locals()[_c.abbrev] = _c diff --git a/astropy/constants/astropyconst20.py b/astropy/constants/astropyconst20.py new file mode 100644 index 0000000..89d1dd3 --- /dev/null +++ b/astropy/constants/astropyconst20.py @@ -0,0 +1,19 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants for Astropy v2.0. See :mod:`astropy.constants` +for a complete listing of constants defined in Astropy. +""" + + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +from .constant import Constant +from . import codata2014, iau2015 + +for _nm, _c in itertools.chain(sorted(vars(codata2014).items()), + sorted(vars(iau2015).items())): + if (isinstance(_c, Constant) and _c.abbrev not in locals()): + locals()[_c.abbrev] = _c diff --git a/astropy/constants/cgs.py b/astropy/constants/cgs.py new file mode 100644 index 0000000..ab681e3 --- /dev/null +++ b/astropy/constants/cgs.py @@ -0,0 +1,18 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants in cgs units. See :mod:`astropy.constants` +for a complete listing of constants defined in Astropy. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +from .constant import Constant +from . import codata2014, iau2015 + +for _nm, _c in itertools.chain(sorted(vars(codata2014).items()), + sorted(vars(iau2015).items())): + if (isinstance(_c, Constant) and _c.abbrev not in locals() + and _c.system in ['esu', 'gauss', 'emu']): + locals()[_c.abbrev] = _c diff --git a/astropy/constants/codata2010.py b/astropy/constants/codata2010.py new file mode 100644 index 0000000..353aefe --- /dev/null +++ b/astropy/constants/codata2010.py @@ -0,0 +1,112 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants in SI units. See :mod:`astropy.constants` +for a complete listing of constants defined in Astropy. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from .constant import Constant, EMConstant + + +# PHYSICAL CONSTANTS + +class CODATA2010(Constant): + default_reference = 'CODATA 2010' + _registry = {} + _has_incompatible_units = set() + + def __new__(cls, abbrev, name, value, unit, uncertainty, + reference=default_reference, system=None): + return(super(CODATA2010, cls).__new__(cls, abbrev, name, value, unit, + uncertainty, reference, system)) + + +class EMCODATA2010(CODATA2010, EMConstant): + _registry = CODATA2010._registry + + +h = CODATA2010('h', "Planck constant", 6.62606957e-34, 'J s', + 0.00000029e-34, system='si') + +hbar = CODATA2010('hbar', "Reduced Planck constant", + h.value * 0.5 / np.pi, 'J s', + h.uncertainty * 0.5 / np.pi, + h.reference, system='si') + +k_B = CODATA2010('k_B', "Boltzmann constant", 1.3806488e-23, 'J / (K)', + 0.0000013e-23, system='si') + +c = CODATA2010('c', "Speed of light in vacuum", 2.99792458e8, 'm / (s)', 0., + system='si') + +G = CODATA2010('G', "Gravitational constant", 6.67384e-11, 'm3 / (kg s2)', + 0.00080e-11, system='si') + +g0 = CODATA2010('g0', "Standard acceleration of gravity", 9.80665, 'm / s2', 0.0, + system='si') + +m_p = CODATA2010('m_p', "Proton mass", 1.672621777e-27, 'kg', 0.000000074e-27, + system='si') + +m_n = CODATA2010('m_n', "Neutron mass", 1.674927351e-27, 'kg', 0.000000074e-27, + system='si') + +m_e = CODATA2010('m_e', "Electron mass", 9.10938291e-31, 'kg', 0.00000040e-31, + system='si') + +u = CODATA2010('u', "Atomic mass", 1.660538921e-27, 'kg', 0.000000073e-27, + system='si') + +sigma_sb = CODATA2010('sigma_sb', "Stefan-Boltzmann constant", 5.670373e-8, + 'W / (K4 m2)', 0.000021e-8, system='si') + +e = EMCODATA2010('e', 'Electron charge', 1.602176565e-19, 'C', 0.000000035e-19, + system='si') + +eps0 = EMCODATA2010('eps0', 'Electric constant', 8.854187817e-12, 'F/m', 0.0, + system='si') + +N_A = CODATA2010('N_A', "Avogadro's number", 6.02214129e23, '1 / (mol)', + 0.00000027e23, system='si') + +R = CODATA2010('R', "Gas constant", 8.3144621, 'J / (K mol)', 0.0000075, + system='si') + +Ryd = CODATA2010('Ryd', 'Rydberg constant', 10973731.568539, '1 / (m)', + 0.000055, system='si') + +a0 = CODATA2010('a0', "Bohr radius", 0.52917721092e-10, 'm', 0.00000000017e-10, + system='si') + +muB = CODATA2010('muB', "Bohr magneton", 927.400968e-26, 'J/T', 0.00002e-26, + system='si') + +alpha = CODATA2010('alpha', "Fine-structure constant", 7.2973525698e-3, + '', 0.0000000024e-3, system='si') + +atm = CODATA2010('atm', "Standard atmosphere", 101325, 'Pa', 0.0, + system='si') + +mu0 = CODATA2010('mu0', "Magnetic constant", 4.0e-7 * np.pi, 'N/A2', 0.0, + system='si') + +sigma_T = CODATA2010('sigma_T', "Thomson scattering cross-section", + 0.6652458734e-28, 'm2', 0.0000000013e-28, system='si') + +b_wien = Constant('b_wien', 'Wien wavelength displacement law constant', + 2.8977721e-3, 'm K', 0.0000026e-3, 'CODATA 2010', system='si') + +# cgs constants +# Only constants that cannot be converted directly from S.I. are defined here. + +e_esu = EMCODATA2010(e.abbrev, e.name, e.value * c.value * 10.0, + 'statC', e.uncertainty * c.value * 10.0, system='esu') + +e_emu = EMCODATA2010(e.abbrev, e.name, e.value / 10, 'abC', + e.uncertainty / 10, system='emu') + +e_gauss = EMCODATA2010(e.abbrev, e.name, e.value * c.value * 10.0, + 'Fr', e.uncertainty * c.value * 10.0, system='gauss') diff --git a/astropy/constants/codata2014.py b/astropy/constants/codata2014.py new file mode 100644 index 0000000..19b33f3 --- /dev/null +++ b/astropy/constants/codata2014.py @@ -0,0 +1,107 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants in SI units. See :mod:`astropy.constants` +for a complete listing of constants defined in Astropy. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from .constant import Constant, EMConstant + + +# PHYSICAL CONSTANTS + +class CODATA2014(Constant): + default_reference = 'CODATA 2014' + _registry = {} + _has_incompatible_units = set() + + +class EMCODATA2014(CODATA2014, EMConstant): + _registry = CODATA2014._registry + + +h = CODATA2014('h', "Planck constant", 6.626070040e-34, + 'J s', 0.000000081e-34, system='si') + +hbar = CODATA2014('hbar', "Reduced Planck constant", 1.054571800e-34, + 'J s', 0.000000013e-34, system='si') + +k_B = CODATA2014('k_B', "Boltzmann constant", 1.38064852e-23, + 'J / (K)', 0.00000079e-23, system='si') + +c = CODATA2014('c', "Speed of light in vacuum", 299792458., + 'm / (s)', 0.0, system='si') + + +G = CODATA2014('G', "Gravitational constant", 6.67408e-11, + 'm3 / (kg s2)', 0.00031e-11, system='si') + +g0 = CODATA2014('g0', "Standard acceleration of gravity", 9.80665, + 'm / s2', 0.0, system='si') + +m_p = CODATA2014('m_p', "Proton mass", 1.672621898e-27, + 'kg', 0.000000021e-27, system='si') + +m_n = CODATA2014('m_n', "Neutron mass", 1.674927471e-27, + 'kg', 0.000000021e-27, system='si') + +m_e = CODATA2014('m_e', "Electron mass", 9.10938356e-31, + 'kg', 0.00000011e-31, system='si') + +u = CODATA2014('u', "Atomic mass", 1.660539040e-27, + 'kg', 0.000000020e-27, system='si') + +sigma_sb = CODATA2014('sigma_sb', "Stefan-Boltzmann constant", 5.670367e-8, + 'W / (K4 m2)', 0.000013e-8, system='si') + +e = EMCODATA2014('e', 'Electron charge', 1.6021766208e-19, + 'C', 0.0000000098e-19, system='si') + +eps0 = EMCODATA2014('eps0', 'Electric constant', 8.854187817e-12, + 'F/m', 0.0, system='si') + +N_A = CODATA2014('N_A', "Avogadro's number", 6.022140857e23, + '1 / (mol)', 0.000000074e23, system='si') + +R = CODATA2014('R', "Gas constant", 8.3144598, + 'J / (K mol)', 0.0000048, system='si') + +Ryd = CODATA2014('Ryd', 'Rydberg constant', 10973731.568508, + '1 / (m)', 0.000065, system='si') + +a0 = CODATA2014('a0', "Bohr radius", 0.52917721067e-10, + 'm', 0.00000000012e-10, system='si') + +muB = CODATA2014('muB', "Bohr magneton", 927.4009994e-26, + 'J/T', 0.00002e-26, system='si') + +alpha = CODATA2014('alpha', "Fine-structure constant", 7.2973525664e-3, + '', 0.0000000017e-3, system='si') + +atm = CODATA2014('atm', "Standard atmosphere", 101325, + 'Pa', 0.0, system='si') + +mu0 = CODATA2014('mu0', "Magnetic constant", 4.0e-7 * np.pi, 'N/A2', 0.0, + system='si') + +sigma_T = CODATA2014('sigma_T', "Thomson scattering cross-section", + 0.66524587158e-28, 'm2', 0.00000000091e-28, + system='si') + +b_wien = CODATA2014('b_wien', 'Wien wavelength displacement law constant', + 2.8977729e-3, 'm K', 00.0000017e-3, system='si') + +# cgs constants +# Only constants that cannot be converted directly from S.I. are defined here. + +e_esu = EMCODATA2014(e.abbrev, e.name, e.value * c.value * 10.0, + 'statC', e.uncertainty * c.value * 10.0, system='esu') + +e_emu = EMCODATA2014(e.abbrev, e.name, e.value / 10, 'abC', + e.uncertainty / 10, system='emu') + +e_gauss = EMCODATA2014(e.abbrev, e.name, e.value * c.value * 10.0, + 'Fr', e.uncertainty * c.value * 10.0, system='gauss') diff --git a/astropy/constants/constant.py b/astropy/constants/constant.py new file mode 100644 index 0000000..1d0c05d --- /dev/null +++ b/astropy/constants/constant.py @@ -0,0 +1,237 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ..extern import six + +import functools +import types +import warnings +import numpy as np + +from ..units.core import Unit, UnitsError +from ..units.quantity import Quantity +from ..utils import lazyproperty +from ..utils.exceptions import AstropyUserWarning +from ..utils.misc import InheritDocstrings + +__all__ = ['Constant', 'EMConstant'] + + +class ConstantMeta(InheritDocstrings): + """Metaclass for the :class:`Constant`. The primary purpose of this is to + wrap the double-underscore methods of :class:`Quantity` which is the + superclass of :class:`Constant`. + + In particular this wraps the operator overloads such as `__add__` to + prevent their use with constants such as ``e`` from being used in + expressions without specifying a system. The wrapper checks to see if the + constant is listed (by name) in ``Constant._has_incompatible_units``, a set + of those constants that are defined in different systems of units are + physically incompatible. It also performs this check on each `Constant` if + it hasn't already been performed (the check is deferred until the + `Constant` is actually used in an expression to speed up import times, + among other reasons). + """ + + def __new__(mcls, name, bases, d): + def wrap(meth): + @functools.wraps(meth) + def wrapper(self, *args, **kwargs): + name_lower = self.name.lower() + instances = self._registry[name_lower] + if not self._checked_units: + for inst in six.itervalues(instances): + try: + self.unit.to(inst.unit) + except UnitsError: + self._has_incompatible_units.add(name_lower) + self._checked_units = True + + if (not self.system and + name_lower in self._has_incompatible_units): + systems = sorted([x for x in instances if x]) + raise TypeError( + 'Constant {0!r} does not have physically compatible ' + 'units across all systems of units and cannot be ' + 'combined with other values without specifying a ' + 'system (eg. {1}.{2})'.format(self.abbrev, self.abbrev, + systems[0])) + + return meth(self, *args, **kwargs) + + return wrapper + + # The wrapper applies to so many of the __ methods that it's easier to + # just exclude the ones it doesn't apply to + exclude = set(['__new__', '__array_finalize__', '__array_wrap__', + '__dir__', '__getattr__', '__init__', '__str__', + '__repr__', '__hash__', '__iter__', '__getitem__', + '__len__', '__nonzero__', '__quantity_subclass__']) + for attr, value in six.iteritems(vars(Quantity)): + if (isinstance(value, types.FunctionType) and + attr.startswith('__') and attr.endswith('__') and + attr not in exclude): + d[attr] = wrap(value) + + return super(ConstantMeta, mcls).__new__(mcls, name, bases, d) + + +@six.add_metaclass(ConstantMeta) +class Constant(Quantity): + """A physical or astronomical constant. + + These objects are quantities that are meant to represent physical + constants. + """ + _registry = {} + _has_incompatible_units = set() + + def __new__(cls, abbrev, name, value, unit, uncertainty, + reference=None, system=None): + if reference is None: + reference = getattr(cls, 'default_reference', None) + if reference is None: + raise TypeError("{} requires a reference.".format(cls)) + name_lower = name.lower() + instances = cls._registry.setdefault(name_lower, {}) + # By-pass Quantity initialization, since units may not yet be + # initialized here, and we store the unit in string form. + inst = np.array(value).view(cls) + + if system in instances: + warnings.warn('Constant {0!r} already has a definition in the ' + '{1!r} system from {2!r} reference'.format( + name, system, reference), AstropyUserWarning) + for c in six.itervalues(instances): + if system is not None and not hasattr(c.__class__, system): + setattr(c, system, inst) + if c.system is not None and not hasattr(inst.__class__, c.system): + setattr(inst, c.system, c) + + instances[system] = inst + + inst._abbrev = abbrev + inst._name = name + inst._value = value + inst._unit_string = unit + inst._uncertainty = uncertainty + inst._reference = reference + inst._system = system + + inst._checked_units = False + return inst + + def __repr__(self): + return ('<{0} name={1!r} value={2} uncertainty={3} unit={4!r} ' + 'reference={5!r}>'.format(self.__class__, self.name, self.value, + self.uncertainty, str(self.unit), + self.reference)) + + def __str__(self): + return (' Name = {0}\n' + ' Value = {1}\n' + ' Uncertainty = {2}\n' + ' Unit = {3}\n' + ' Reference = {4}'.format(self.name, self.value, + self.uncertainty, self.unit, + self.reference)) + + def __quantity_subclass__(self, unit): + return super(Constant, self).__quantity_subclass__(unit)[0], False + + def copy(self): + """ + Return a copy of this `Constant` instance. Since they are by + definition immutable, this merely returns another reference to + ``self``. + """ + return self + __deepcopy__ = __copy__ = copy + + @property + def abbrev(self): + """A typical ASCII text abbreviation of the constant, also generally + the same as the Python variable used for this constant. + """ + + return self._abbrev + + @property + def name(self): + """The full name of the constant.""" + + return self._name + + @lazyproperty + def _unit(self): + """The unit(s) in which this constant is defined.""" + + return Unit(self._unit_string) + + @property + def uncertainty(self): + """The known uncertainty in this constant's value.""" + + return self._uncertainty + + @property + def reference(self): + """The source used for the value of this constant.""" + + return self._reference + + @property + def system(self): + """The system of units in which this constant is defined (typically + `None` so long as the constant's units can be directly converted + between systems). + """ + + return self._system + + def _instance_or_super(self, key): + instances = self._registry[self.name.lower()] + inst = instances.get(key) + if inst is not None: + return inst + else: + return getattr(super(Constant, self), key) + + @property + def si(self): + """If the Constant is defined in the SI system return that instance of + the constant, else convert to a Quantity in the appropriate SI units. + """ + + return self._instance_or_super('si') + + @property + def cgs(self): + """If the Constant is defined in the CGS system return that instance of + the constant, else convert to a Quantity in the appropriate CGS units. + """ + + return self._instance_or_super('cgs') + + def __array_finalize__(self, obj): + for attr in ('_abbrev', '_name', '_value', '_unit_string', + '_uncertainty', '_reference', '_system'): + setattr(self, attr, getattr(obj, attr, None)) + + self._checked_units = getattr(obj, '_checked_units', False) + + +class EMConstant(Constant): + """An electromagnetic constant.""" + + @property + def cgs(self): + """Overridden for EMConstant to raise a `TypeError` + emphasizing that there are multiple EM extensions to CGS. + """ + + raise TypeError("Cannot convert EM constants to cgs because there " + "are different systems for E.M constants within the " + "c.g.s system (ESU, Gaussian, etc.). Instead, " + "directly use the constant with the appropriate " + "suffix (e.g. e.esu, e.gauss, etc.).") diff --git a/astropy/constants/iau2012.py b/astropy/constants/iau2012.py new file mode 100644 index 0000000..4c8ed67 --- /dev/null +++ b/astropy/constants/iau2012.py @@ -0,0 +1,78 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants in SI units. See :mod:`astropy.constants` +for a complete listing of constants defined in Astropy. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from .constant import Constant + +# ASTRONOMICAL CONSTANTS + + +class IAU2012(Constant): + default_reference = 'IAU 2012' + _registry = {} + _has_incompatible_units = set() + + +# DISTANCE + +# Astronomical Unit +au = IAU2012('au', "Astronomical Unit", 1.49597870700e11, 'm', 0.0, + "IAU 2012 Resolution B2", system='si') + +# Parsec + +pc = IAU2012('pc', "Parsec", au.value / np.tan(np.radians(1. / 3600.)), 'm', + au.uncertainty / np.tan(np.radians(1. / 3600.)), + "Derived from au", system='si') + +# Kiloparsec +kpc = IAU2012('kpc', "Kiloparsec", + 1000. * au.value / np.tan(np.radians(1. / 3600.)), 'm', + 1000. * au.uncertainty / np.tan(np.radians(1. / 3600.)), + "Derived from au", system='si') + +# Luminosity +L_bol0 = IAU2012('L_bol0', "Luminosity for absolute bolometric magnitude 0", + 3.0128e28, "W", 0.0, "IAU 2015 Resolution B 2", system='si') + + +# SOLAR QUANTITIES + +# Solar luminosity +L_sun = IAU2012('L_sun', "Solar luminosity", 3.846e26, 'W', 0.0005e26, + "Allen's Astrophysical Quantities 4th Ed.", system='si') + +# Solar mass +M_sun = IAU2012('M_sun', "Solar mass", 1.9891e30, 'kg', 0.00005e30, + "Allen's Astrophysical Quantities 4th Ed.", system='si') + +# Solar radius +R_sun = IAU2012('R_sun', "Solar radius", 6.95508e8, 'm', 0.00026e8, + "Allen's Astrophysical Quantities 4th Ed.", system='si') + + +# OTHER SOLAR SYSTEM QUANTITIES + +# Jupiter mass +M_jup = IAU2012('M_jup', "Jupiter mass", 1.8987e27, 'kg', 0.00005e27, + "Allen's Astrophysical Quantities 4th Ed.", system='si') + +# Jupiter equatorial radius +R_jup = IAU2012('R_jup', "Jupiter equatorial radius", 7.1492e7, 'm', + 0.00005e7, "Allen's Astrophysical Quantities 4th Ed.", + system='si') + +# Earth mass +M_earth = IAU2012('M_earth', "Earth mass", 5.9742e24, 'kg', 0.00005e24, + "Allen's Astrophysical Quantities 4th Ed.", system='si') + +# Earth equatorial radius +R_earth = IAU2012('R_earth', "Earth equatorial radius", 6.378136e6, 'm', + 0.0000005e6, "Allen's Astrophysical Quantities 4th Ed.", + system='si') diff --git a/astropy/constants/iau2015.py b/astropy/constants/iau2015.py new file mode 100644 index 0000000..7ae11ca --- /dev/null +++ b/astropy/constants/iau2015.py @@ -0,0 +1,96 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants in SI units. See :mod:`astropy.constants` +for a complete listing of constants defined in Astropy. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from .constant import Constant +from .codata2014 import G + +# ASTRONOMICAL CONSTANTS + + +class IAU2015(Constant): + default_reference = 'IAU 2015' + _registry = {} + _has_incompatible_units = set() + + +# DISTANCE + +# Astronomical Unit +au = IAU2015('au', "Astronomical Unit", 1.49597870700e11, 'm', 0.0, + "IAU 2012 Resolution B2", system='si') + +# Parsec + +pc = IAU2015('pc', "Parsec", au.value / np.tan(np.radians(1. / 3600.)), 'm', + au.uncertainty / np.tan(np.radians(1. / 3600.)), + "Derived from au", system='si') + +# Kiloparsec +kpc = IAU2015('kpc', "Kiloparsec", + 1000. * au.value / np.tan(np.radians(1. / 3600.)), 'm', + 1000. * au.uncertainty / np.tan(np.radians(1. / 3600.)), + "Derived from au", system='si') + +# Luminosity +L_bol0 = IAU2015('L_bol0', "Luminosity for absolute bolometric magnitude 0", + 3.0128e28, "W", 0.0, "IAU 2015 Resolution B 2", system='si') + + +# SOLAR QUANTITIES + +# Solar luminosity +L_sun = IAU2015('L_sun', "Nominal solar luminosity", 3.828e26, + 'W', 0.0, "IAU 2015 Resolution B 3", system='si') + +# Solar mass parameter +GM_sun = IAU2015('GM_sun', 'Nominal solar mass parameter', 1.3271244e20, + 'm3 / (s2)', 0.0, "IAU 2015 Resolution B 3", system='si') + +# Solar mass (derived from mass parameter and gravitational constant) +M_sun = IAU2015('M_sun', "Solar mass", GM_sun.value / G.value, + 'kg', ((G.uncertainty / G.value) * + (GM_sun.value / G.value)), + "IAU 2015 Resolution B 3 + CODATA 2014", system='si') + +# Solar radius +R_sun = IAU2015('R_sun', "Nominal solar radius", 6.957e8, 'm', 0.0, + "IAU 2015 Resolution B 3", system='si') + + +# OTHER SOLAR SYSTEM QUANTITIES + +# Jupiter mass parameter +GM_jup = IAU2015('GM_jup', 'Nominal Jupiter mass parameter', 1.2668653e17, + 'm3 / (s2)', 0.0, "IAU 2015 Resolution B 3", system='si') + +# Jupiter mass (derived from mass parameter and gravitational constant) +M_jup = IAU2015('M_jup', "Jupiter mass", GM_jup.value / G.value, + 'kg', ((G.uncertainty / G.value) * + (GM_jup.value / G.value)), + "IAU 2015 Resolution B 3 + CODATA 2014", system='si') + +# Jupiter equatorial radius +R_jup = IAU2015('R_jup', "Nominal Jupiter equatorial radius", 7.1492e7, + 'm', 0.0, "IAU 2015 Resolution B 3", system='si') + +# Earth mass parameter +GM_earth = IAU2015('GM_earth', 'Nominal Earth mass parameter', 3.986004e14, + 'm3 / (s2)', 0.0, "IAU 2015 Resolution B 3", system='si') + +# Earth mass (derived from mass parameter and gravitational constant) +M_earth = IAU2015('M_earth', "Earth mass", + GM_earth.value / G.value, + 'kg', ((G.uncertainty / G.value) * + (GM_earth.value / G.value)), + "IAU 2015 Resolution B 3 + CODATA 2014", system='si') + +# Earth equatorial radius +R_earth = IAU2015('R_earth', "Nominal Earth equatorial radius", 6.3781e6, + 'm', 0.0, "IAU 2015 Resolution B 3", system='si') diff --git a/astropy/constants/setup_package.py b/astropy/constants/setup_package.py new file mode 100644 index 0000000..3cd9f7c --- /dev/null +++ b/astropy/constants/setup_package.py @@ -0,0 +1,5 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + + +def requires_2to3(): + return False diff --git a/astropy/constants/si.py b/astropy/constants/si.py new file mode 100644 index 0000000..ed529bd --- /dev/null +++ b/astropy/constants/si.py @@ -0,0 +1,20 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Astronomical and physics constants in SI units. See :mod:`astropy.constants` +for a complete listing of constants defined in Astropy. +""" + + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +from .constant import Constant +from . import codata2014, iau2015 + +for _nm, _c in itertools.chain(sorted(vars(codata2014).items()), + sorted(vars(iau2015).items())): + if (isinstance(_c, Constant) and _c.abbrev not in locals() + and _c.system == 'si'): + locals()[_c.abbrev] = _c diff --git a/astropy/constants/tests/__init__.py b/astropy/constants/tests/__init__.py new file mode 100644 index 0000000..800d82e --- /dev/null +++ b/astropy/constants/tests/__init__.py @@ -0,0 +1,2 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) diff --git a/astropy/constants/tests/test_constant.py b/astropy/constants/tests/test_constant.py new file mode 100644 index 0000000..4ed874e --- /dev/null +++ b/astropy/constants/tests/test_constant.py @@ -0,0 +1,165 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ...extern import six + +import copy + +import pytest + +from .. import Constant +from ...units import Quantity as Q + + +def test_c(): + + from .. import c + + # c is an exactly defined constant, so it shouldn't be changing + assert c.value == 2.99792458e8 # default is S.I. + assert c.si.value == 2.99792458e8 + assert c.cgs.value == 2.99792458e10 + + # make sure it has the necessary attributes and they're not blank + assert c.uncertainty == 0 # c is a *defined* quantity + assert c.name + assert c.reference + assert c.unit + + +def test_h(): + + from .. import h + + # check that the value is fairly close to what it should be (not exactly + # checking because this might get updated in the future) + assert abs(h.value - 6.626e-34) < 1e-38 + assert abs(h.si.value - 6.626e-34) < 1e-38 + assert abs(h.cgs.value - 6.626e-27) < 1e-31 + + # make sure it has the necessary attributes and they're not blank + assert h.uncertainty + assert h.name + assert h.reference + assert h.unit + + +def test_e(): + """Tests for #572 demonstrating how EM constants should behave.""" + + from .. import e + + # A test quantity + E = Q(100, 'V/m') + + # Without specifying a system e should not combine with other quantities + pytest.raises(TypeError, lambda: e * E) + # Try it again (as regression test on a minor issue mentioned in #745 where + # repeated attempts to use e in an expression resulted in UnboundLocalError + # instead of TypeError) + pytest.raises(TypeError, lambda: e * E) + + # e.cgs is too ambiguous and should not work at all + pytest.raises(TypeError, lambda: e.cgs * E) + + assert isinstance(e.si, Q) + assert isinstance(e.gauss, Q) + assert isinstance(e.esu, Q) + + assert e.si * E == Q(100, 'eV/m') + assert e.gauss * E == Q(e.gauss.value * E.value, 'Fr V/m') + assert e.esu * E == Q(e.esu.value * E.value, 'Fr V/m') + + +def test_g0(): + """Tests for #1263 demonstrating how g0 constant should behave.""" + from .. import g0 + + # g0 is an exactly defined constant, so it shouldn't be changing + assert g0.value == 9.80665 # default is S.I. + assert g0.si.value == 9.80665 + assert g0.cgs.value == 9.80665e2 + + # make sure it has the necessary attributes and they're not blank + assert g0.uncertainty == 0 # g0 is a *defined* quantity + assert g0.name + assert g0.reference + assert g0.unit + + # Check that its unit have the correct physical type + assert g0.unit.physical_type == 'acceleration' + + +def test_b_wien(): + """b_wien should give the correct peak wavelength for + given blackbody temperature. The Sun is used in this test. + + """ + from .. import b_wien + from ... import units as u + t = 5778 * u.K + w = (b_wien / t).to(u.nm) + assert round(w.value) == 502 + + +def test_unit(): + + from ... import units as u + + from ... import constants as const + + for key, val in six.iteritems(vars(const)): + if isinstance(val, Constant): + # Getting the unit forces the unit parser to run. Confirm + # that none of the constants defined in astropy have + # invalid unit. + assert not isinstance(val.unit, u.UnrecognizedUnit) + + +def test_copy(): + from ... import constants as const + cc = copy.deepcopy(const.c) + assert cc == const.c + + cc = copy.copy(const.c) + assert cc == const.c + + +def test_view(): + """Check that Constant and Quantity views can be taken (#3537, #3538).""" + from .. import c + c2 = c.view(Constant) + assert c2 == c + assert c2.value == c.value + # make sure it has the necessary attributes and they're not blank + assert c2.uncertainty == 0 # c is a *defined* quantity + assert c2.name == c.name + assert c2.reference == c.reference + assert c2.unit == c.unit + + q1 = c.view(Q) + assert q1 == c + assert q1.value == c.value + assert type(q1) is Q + assert not hasattr(q1, 'reference') + + q2 = Q(c) + assert q2 == c + assert q2.value == c.value + assert type(q2) is Q + assert not hasattr(q2, 'reference') + + c3 = Q(c, subok=True) + assert c3 == c + assert c3.value == c.value + # make sure it has the necessary attributes and they're not blank + assert c3.uncertainty == 0 # c is a *defined* quantity + assert c3.name == c.name + assert c3.reference == c.reference + assert c3.unit == c.unit + + c4 = Q(c, subok=True, copy=False) + assert c4 is c diff --git a/astropy/constants/tests/test_pickle.py b/astropy/constants/tests/test_pickle.py new file mode 100644 index 0000000..8087aa0 --- /dev/null +++ b/astropy/constants/tests/test_pickle.py @@ -0,0 +1,22 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import absolute_import, division, print_function, unicode_literals + +import pytest + +from ... import constants as const +from ...tests.helper import pickle_protocol, check_pickling_recovery # noqa +from ...extern.six.moves import zip + +originals = [const.Constant('h_fake', 'Not Planck', + 0.0, 'J s', 0.0, 'fakeref', + system='si'), + const.h, + const.e] +xfails = [True, True, True] + + +@pytest.mark.parametrize(("original", "xfail"), zip(originals, xfails)) +def test_new_constant(pickle_protocol, original, xfail): + if xfail: + pytest.xfail() + check_pickling_recovery(original, pickle_protocol) diff --git a/astropy/constants/tests/test_prior_version.py b/astropy/constants/tests/test_prior_version.py new file mode 100644 index 0000000..2296ed7 --- /dev/null +++ b/astropy/constants/tests/test_prior_version.py @@ -0,0 +1,161 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ...extern import six + +import copy + +import pytest + +from .. import Constant +from ...units import Quantity as Q + + +def test_c(): + + from ..codata2010 import c + + # c is an exactly defined constant, so it shouldn't be changing + assert c.value == 2.99792458e8 # default is S.I. + assert c.si.value == 2.99792458e8 + assert c.cgs.value == 2.99792458e10 + + # make sure it has the necessary attributes and they're not blank + assert c.uncertainty == 0 # c is a *defined* quantity + assert c.name + assert c.reference + assert c.unit + + +def test_h(): + + from ..codata2010 import h + from .. import h as h_current + + # check that the value is the CODATA2010 value + assert abs(h.value - 6.62606957e-34) < 1e-43 + assert abs(h.si.value - 6.62606957e-34) < 1e-43 + assert abs(h.cgs.value - 6.62606957e-27) < 1e-36 + + # Check it is different than the current value + assert abs(h.value - h_current.value) > 4e-42 + + # make sure it has the necessary attributes and they're not blank + assert h.uncertainty + assert h.name + assert h.reference + assert h.unit + + +def test_e(): + + from ..astropyconst13 import e + + # A test quantity + E = Q(100.00000348276221, 'V/m') + + # e.cgs is too ambiguous and should not work at all + with pytest.raises(TypeError): + e.cgs * E + + assert isinstance(e.si, Q) + assert isinstance(e.gauss, Q) + assert isinstance(e.esu, Q) + + assert e.si * E == Q(100, 'eV/m') + assert e.gauss * E == Q(e.gauss.value * E.value, 'Fr V/m') + assert e.esu * E == Q(e.esu.value * E.value, 'Fr V/m') + + +def test_g0(): + """Tests for #1263 demonstrating how g0 constant should behave.""" + from ..astropyconst13 import g0 + + # g0 is an exactly defined constant, so it shouldn't be changing + assert g0.value == 9.80665 # default is S.I. + assert g0.si.value == 9.80665 + assert g0.cgs.value == 9.80665e2 + + # make sure it has the necessary attributes and they're not blank + assert g0.uncertainty == 0 # g0 is a *defined* quantity + assert g0.name + assert g0.reference + assert g0.unit + + # Check that its unit have the correct physical type + assert g0.unit.physical_type == 'acceleration' + + +def test_b_wien(): + """b_wien should give the correct peak wavelength for + given blackbody temperature. The Sun is used in this test. + + """ + from ..astropyconst13 import b_wien + from ... import units as u + t = 5778 * u.K + w = (b_wien / t).to(u.nm) + assert round(w.value) == 502 + + +def test_unit(): + + from ... import units as u + + from .. import astropyconst13 as const + + for key, val in six.iteritems(vars(const)): + if isinstance(val, Constant): + # Getting the unit forces the unit parser to run. Confirm + # that none of the constants defined in astropy have + # invalid unit. + assert not isinstance(val.unit, u.UnrecognizedUnit) + + +def test_copy(): + from ... import constants as const + cc = copy.deepcopy(const.c) + assert cc == const.c + + cc = copy.copy(const.c) + assert cc == const.c + + +def test_view(): + """Check that Constant and Quantity views can be taken (#3537, #3538).""" + from .. import c + c2 = c.view(Constant) + assert c2 == c + assert c2.value == c.value + # make sure it has the necessary attributes and they're not blank + assert c2.uncertainty == 0 # c is a *defined* quantity + assert c2.name == c.name + assert c2.reference == c.reference + assert c2.unit == c.unit + + q1 = c.view(Q) + assert q1 == c + assert q1.value == c.value + assert type(q1) is Q + assert not hasattr(q1, 'reference') + + q2 = Q(c) + assert q2 == c + assert q2.value == c.value + assert type(q2) is Q + assert not hasattr(q2, 'reference') + + c3 = Q(c, subok=True) + assert c3 == c + assert c3.value == c.value + # make sure it has the necessary attributes and they're not blank + assert c3.uncertainty == 0 # c is a *defined* quantity + assert c3.name == c.name + assert c3.reference == c.reference + assert c3.unit == c.unit + + c4 = Q(c, subok=True, copy=False) + assert c4 is c diff --git a/astropy/convolution/__init__.py b/astropy/convolution/__init__.py new file mode 100644 index 0000000..6369980 --- /dev/null +++ b/astropy/convolution/__init__.py @@ -0,0 +1,15 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from .core import * +from .kernels import * +from .utils import discretize_model + +try: + # Not guaranteed available at setup time + from .convolve import convolve, convolve_fft, interpolate_replace_nans, convolve_models +except ImportError: + if not _ASTROPY_SETUP_: + raise diff --git a/astropy/convolution/boundary_extend.c b/astropy/convolution/boundary_extend.c new file mode 100644 index 0000000..2e110db --- /dev/null +++ b/astropy/convolution/boundary_extend.c @@ -0,0 +1,9883 @@ +/* Generated by Cython 0.27.3 */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_27_3" +#define CYTHON_FUTURE_DIVISION 1 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + + +#define __PYX_ERR(f_index, lineno, Ln_error) \ +{ \ + __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ +} + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__astropy__convolution__boundary_extend +#define __PYX_HAVE_API__astropy__convolution__boundary_extend +#include +#include +#include "numpy/arrayobject.h" +#include "numpy/ufuncobject.h" +#include "numpy/npy_math.h" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + +/* Header.proto */ +#if !defined(CYTHON_CCOMPLEX) + #if defined(__cplusplus) + #define CYTHON_CCOMPLEX 1 + #elif defined(_Complex_I) + #define CYTHON_CCOMPLEX 1 + #else + #define CYTHON_CCOMPLEX 0 + #endif +#endif +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #include + #else + #include + #endif +#endif +#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) + #undef _Complex_I + #define _Complex_I 1.0fj +#endif + + +static const char *__pyx_f[] = { + "astropy/convolution/boundary_extend.pyx", + "__init__.pxd", + "type.pxd", +}; +/* BufferFormatStructs.proto */ +#define IS_UNSIGNED(type) (((type) -1) > 0) +struct __Pyx_StructField_; +#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) +typedef struct { + const char* name; + struct __Pyx_StructField_* fields; + size_t size; + size_t arraysize[8]; + int ndim; + char typegroup; + char is_unsigned; + int flags; +} __Pyx_TypeInfo; +typedef struct __Pyx_StructField_ { + __Pyx_TypeInfo* type; + const char* name; + size_t offset; +} __Pyx_StructField; +typedef struct { + __Pyx_StructField* field; + size_t parent_offset; +} __Pyx_BufFmt_StackElem; +typedef struct { + __Pyx_StructField root; + __Pyx_BufFmt_StackElem* head; + size_t fmt_offset; + size_t new_count, enc_count; + size_t struct_alignment; + int is_complex; + char enc_type; + char new_packmode; + char enc_packmode; + char is_valid_array; +} __Pyx_BufFmt_Context; + +/* NoFastGil.proto */ +#define __Pyx_PyGILState_Ensure PyGILState_Ensure +#define __Pyx_PyGILState_Release PyGILState_Release +#define __Pyx_FastGIL_Remember() +#define __Pyx_FastGIL_Forget() +#define __Pyx_FastGilFuncInit() + +/* ForceInitThreads.proto */ +#ifndef __PYX_FORCE_INIT_THREADS + #define __PYX_FORCE_INIT_THREADS 0 +#endif + + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":743 + * # in Cython to enable them only on the right systems. + * + * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + */ +typedef npy_int8 __pyx_t_5numpy_int8_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":744 + * + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t + */ +typedef npy_int16 __pyx_t_5numpy_int16_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":745 + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< + * ctypedef npy_int64 int64_t + * #ctypedef npy_int96 int96_t + */ +typedef npy_int32 __pyx_t_5numpy_int32_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":746 + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< + * #ctypedef npy_int96 int96_t + * #ctypedef npy_int128 int128_t + */ +typedef npy_int64 __pyx_t_5numpy_int64_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":750 + * #ctypedef npy_int128 int128_t + * + * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + */ +typedef npy_uint8 __pyx_t_5numpy_uint8_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":751 + * + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t + */ +typedef npy_uint16 __pyx_t_5numpy_uint16_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":752 + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< + * ctypedef npy_uint64 uint64_t + * #ctypedef npy_uint96 uint96_t + */ +typedef npy_uint32 __pyx_t_5numpy_uint32_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":753 + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< + * #ctypedef npy_uint96 uint96_t + * #ctypedef npy_uint128 uint128_t + */ +typedef npy_uint64 __pyx_t_5numpy_uint64_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":757 + * #ctypedef npy_uint128 uint128_t + * + * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< + * ctypedef npy_float64 float64_t + * #ctypedef npy_float80 float80_t + */ +typedef npy_float32 __pyx_t_5numpy_float32_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":758 + * + * ctypedef npy_float32 float32_t + * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< + * #ctypedef npy_float80 float80_t + * #ctypedef npy_float128 float128_t + */ +typedef npy_float64 __pyx_t_5numpy_float64_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":767 + * # The int types are mapped a bit surprising -- + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t + */ +typedef npy_long __pyx_t_5numpy_int_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":768 + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong longlong_t + * + */ +typedef npy_longlong __pyx_t_5numpy_long_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":769 + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_ulong uint_t + */ +typedef npy_longlong __pyx_t_5numpy_longlong_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":771 + * ctypedef npy_longlong longlong_t + * + * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t + */ +typedef npy_ulong __pyx_t_5numpy_uint_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":772 + * + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulonglong_t + * + */ +typedef npy_ulonglong __pyx_t_5numpy_ulong_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":773 + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_intp intp_t + */ +typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775 + * ctypedef npy_ulonglong ulonglong_t + * + * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< + * ctypedef npy_uintp uintp_t + * + */ +typedef npy_intp __pyx_t_5numpy_intp_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776 + * + * ctypedef npy_intp intp_t + * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< + * + * ctypedef npy_double float_t + */ +typedef npy_uintp __pyx_t_5numpy_uintp_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778 + * ctypedef npy_uintp uintp_t + * + * ctypedef npy_double float_t # <<<<<<<<<<<<<< + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t + */ +typedef npy_double __pyx_t_5numpy_float_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779 + * + * ctypedef npy_double float_t + * ctypedef npy_double double_t # <<<<<<<<<<<<<< + * ctypedef npy_longdouble longdouble_t + * + */ +typedef npy_double __pyx_t_5numpy_double_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":780 + * ctypedef npy_double float_t + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cfloat cfloat_t + */ +typedef npy_longdouble __pyx_t_5numpy_longdouble_t; + +/* "astropy/convolution/boundary_extend.pyx":7 + * + * DTYPE = np.float + * ctypedef np.float_t DTYPE_t # <<<<<<<<<<<<<< + * + * cdef inline int int_max(int a, int b) nogil: return a if a >= b else b + */ +typedef __pyx_t_5numpy_float_t __pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t; +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< float > __pyx_t_float_complex; + #else + typedef float _Complex __pyx_t_float_complex; + #endif +#else + typedef struct { float real, imag; } __pyx_t_float_complex; +#endif +static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); + +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< double > __pyx_t_double_complex; + #else + typedef double _Complex __pyx_t_double_complex; + #endif +#else + typedef struct { double real, imag; } __pyx_t_double_complex; +#endif +static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); + + +/*--- Type declarations ---*/ + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":782 + * ctypedef npy_longdouble longdouble_t + * + * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t + */ +typedef npy_cfloat __pyx_t_5numpy_cfloat_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":783 + * + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< + * ctypedef npy_clongdouble clongdouble_t + * + */ +typedef npy_cdouble __pyx_t_5numpy_cdouble_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784 + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cdouble complex_t + */ +typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":786 + * ctypedef npy_clongdouble clongdouble_t + * + * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew1(a): + */ +typedef npy_cdouble __pyx_t_5numpy_complex_t; + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* IsLittleEndian.proto */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); + +/* BufferFormatCheck.proto */ +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type); + +/* BufferGetAndValidate.proto */ +#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ + ((obj == Py_None || obj == NULL) ?\ + (__Pyx_ZeroBuffer(buf), 0) :\ + __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) +static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, + __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); +static void __Pyx_ZeroBuffer(Py_buffer* buf); +static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); +static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; +static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + +/* None.proto */ +static CYTHON_INLINE long __Pyx_mod_long(long, long); + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* GetModuleGlobalName.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); + +/* None.proto */ +static CYTHON_INLINE long __Pyx_div_long(long, long); + +/* ExtTypeTest.proto */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); + +#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) +#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) +#define __Pyx_BufPtrStrided3d(type, buf, i0, s0, i1, s1, i2, s2) (type)((char*)buf + i0 * s0 + i1 * s1 + i2 * s2) +/* DictGetItem.proto */ +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { + PyObject *value; + value = PyDict_GetItemWithError(d, key); + if (unlikely(!value)) { + if (!PyErr_Occurred()) { + PyObject* args = PyTuple_Pack(1, key); + if (likely(args)) + PyErr_SetObject(PyExc_KeyError, args); + Py_XDECREF(args); + } + return NULL; + } + Py_INCREF(value); + return value; +} +#else + #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) +#endif + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* RaiseNoneIterError.proto */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* BufferStructDeclare.proto */ +typedef struct { + Py_ssize_t shape, strides, suboffsets; +} __Pyx_Buf_DimInfo; +typedef struct { + size_t refcount; + Py_buffer pybuffer; +} __Pyx_Buffer; +typedef struct { + __Pyx_Buffer *rcbuffer; + char *data; + __Pyx_Buf_DimInfo diminfo[8]; +} __Pyx_LocalBuf_ND; + +#if PY_MAJOR_VERSION < 3 + static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); + static void __Pyx_ReleaseBuffer(Py_buffer *view); +#else + #define __Pyx_GetBuffer PyObject_GetBuffer + #define __Pyx_ReleaseBuffer PyBuffer_Release +#endif + + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* RealImag.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #define __Pyx_CREAL(z) ((z).real()) + #define __Pyx_CIMAG(z) ((z).imag()) + #else + #define __Pyx_CREAL(z) (__real__(z)) + #define __Pyx_CIMAG(z) (__imag__(z)) + #endif +#else + #define __Pyx_CREAL(z) ((z).real) + #define __Pyx_CIMAG(z) ((z).imag) +#endif +#if defined(__cplusplus) && CYTHON_CCOMPLEX\ + && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) + #define __Pyx_SET_CREAL(z,x) ((z).real(x)) + #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) +#else + #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) + #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_float(a, b) ((a)==(b)) + #define __Pyx_c_sum_float(a, b) ((a)+(b)) + #define __Pyx_c_diff_float(a, b) ((a)-(b)) + #define __Pyx_c_prod_float(a, b) ((a)*(b)) + #define __Pyx_c_quot_float(a, b) ((a)/(b)) + #define __Pyx_c_neg_float(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_float(z) ((z)==(float)0) + #define __Pyx_c_conj_float(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_float(z) (::std::abs(z)) + #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_float(z) ((z)==0) + #define __Pyx_c_conj_float(z) (conjf(z)) + #if 1 + #define __Pyx_c_abs_float(z) (cabsf(z)) + #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); + #endif +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_double(a, b) ((a)==(b)) + #define __Pyx_c_sum_double(a, b) ((a)+(b)) + #define __Pyx_c_diff_double(a, b) ((a)-(b)) + #define __Pyx_c_prod_double(a, b) ((a)*(b)) + #define __Pyx_c_quot_double(a, b) ((a)/(b)) + #define __Pyx_c_neg_double(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_double(z) ((z)==(double)0) + #define __Pyx_c_conj_double(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (::std::abs(z)) + #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_double(z) ((z)==0) + #define __Pyx_c_conj_double(z) (conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (cabs(z)) + #define __Pyx_c_pow_double(a, b) (cpow(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); + #endif +#endif + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* PyIdentifierFromString.proto */ +#if !defined(__Pyx_PyIdentifier_FromString) +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) +#else + #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) +#endif +#endif + +/* ModuleImport.proto */ +static PyObject *__Pyx_ImportModule(const char *name); + +/* TypeImport.proto */ +static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + + +/* Module declarations from 'cpython.buffer' */ + +/* Module declarations from 'libc.string' */ + +/* Module declarations from 'libc.stdio' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.type' */ +static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; + +/* Module declarations from 'cpython' */ + +/* Module declarations from 'cpython.object' */ + +/* Module declarations from 'cpython.ref' */ + +/* Module declarations from 'cpython.mem' */ + +/* Module declarations from 'numpy' */ + +/* Module declarations from 'numpy' */ +static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; +static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; +static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; +static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; +static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ + +/* Module declarations from 'cython' */ + +/* Module declarations from 'astropy.convolution.boundary_extend' */ +static CYTHON_INLINE int __pyx_f_7astropy_11convolution_15boundary_extend_int_max(int, int); /*proto*/ +static CYTHON_INLINE int __pyx_f_7astropy_11convolution_15boundary_extend_int_min(int, int); /*proto*/ +static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t), { 0 }, 0, 'R', 0, 0 }; +#define __Pyx_MODULE_NAME "astropy.convolution.boundary_extend" +extern int __pyx_module_is_main_astropy__convolution__boundary_extend; +int __pyx_module_is_main_astropy__convolution__boundary_extend = 0; + +/* Implementation of 'astropy.convolution.boundary_extend' */ +static PyObject *__pyx_builtin_ValueError; +static PyObject *__pyx_builtin_range; +static PyObject *__pyx_builtin_RuntimeError; +static PyObject *__pyx_builtin_ImportError; +static const char __pyx_k_f[] = "f"; +static const char __pyx_k_g[] = "g"; +static const char __pyx_k_i[] = "i"; +static const char __pyx_k_j[] = "j"; +static const char __pyx_k_k[] = "k"; +static const char __pyx_k_ii[] = "ii"; +static const char __pyx_k_jj[] = "jj"; +static const char __pyx_k_kk[] = "kk"; +static const char __pyx_k_np[] = "np"; +static const char __pyx_k_nx[] = "nx"; +static const char __pyx_k_ny[] = "ny"; +static const char __pyx_k_nz[] = "nz"; +static const char __pyx_k_bot[] = "bot"; +static const char __pyx_k_iii[] = "iii"; +static const char __pyx_k_jjj[] = "jjj"; +static const char __pyx_k_ker[] = "ker"; +static const char __pyx_k_kkk[] = "kkk"; +static const char __pyx_k_nkx[] = "nkx"; +static const char __pyx_k_nky[] = "nky"; +static const char __pyx_k_nkz[] = "nkz"; +static const char __pyx_k_top[] = "top"; +static const char __pyx_k_val[] = "val"; +static const char __pyx_k_wkx[] = "wkx"; +static const char __pyx_k_wky[] = "wky"; +static const char __pyx_k_wkz[] = "wkz"; +static const char __pyx_k_conv[] = "conv"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_DTYPE[] = "DTYPE"; +static const char __pyx_k_dtype[] = "dtype"; +static const char __pyx_k_empty[] = "empty"; +static const char __pyx_k_float[] = "float"; +static const char __pyx_k_iimax[] = "iimax"; +static const char __pyx_k_iimin[] = "iimin"; +static const char __pyx_k_jjmax[] = "jjmax"; +static const char __pyx_k_jjmin[] = "jjmin"; +static const char __pyx_k_kkmax[] = "kkmax"; +static const char __pyx_k_kkmin[] = "kkmin"; +static const char __pyx_k_numpy[] = "numpy"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_ValueError[] = "ValueError"; +static const char __pyx_k_ImportError[] = "ImportError"; +static const char __pyx_k_RuntimeError[] = "RuntimeError"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_normalize_by_kernel[] = "normalize_by_kernel"; +static const char __pyx_k_convolve1d_boundary_extend[] = "convolve1d_boundary_extend"; +static const char __pyx_k_convolve2d_boundary_extend[] = "convolve2d_boundary_extend"; +static const char __pyx_k_convolve3d_boundary_extend[] = "convolve3d_boundary_extend"; +static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; +static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; +static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; +static const char __pyx_k_Convolution_kernel_must_have_odd[] = "Convolution kernel must have odd dimensions"; +static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; +static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; +static const char __pyx_k_astropy_convolution_boundary_ext[] = "astropy/convolution/boundary_extend.pyx"; +static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; +static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; +static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; +static const char __pyx_k_astropy_convolution_boundary_ext_2[] = "astropy.convolution.boundary_extend"; +static PyObject *__pyx_kp_s_Convolution_kernel_must_have_odd; +static PyObject *__pyx_n_s_DTYPE; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; +static PyObject *__pyx_n_s_ImportError; +static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; +static PyObject *__pyx_n_s_RuntimeError; +static PyObject *__pyx_n_s_ValueError; +static PyObject *__pyx_kp_s_astropy_convolution_boundary_ext; +static PyObject *__pyx_n_s_astropy_convolution_boundary_ext_2; +static PyObject *__pyx_n_s_bot; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_conv; +static PyObject *__pyx_n_s_convolve1d_boundary_extend; +static PyObject *__pyx_n_s_convolve2d_boundary_extend; +static PyObject *__pyx_n_s_convolve3d_boundary_extend; +static PyObject *__pyx_n_s_dtype; +static PyObject *__pyx_n_s_empty; +static PyObject *__pyx_n_s_f; +static PyObject *__pyx_n_s_float; +static PyObject *__pyx_n_s_g; +static PyObject *__pyx_n_s_i; +static PyObject *__pyx_n_s_ii; +static PyObject *__pyx_n_s_iii; +static PyObject *__pyx_n_s_iimax; +static PyObject *__pyx_n_s_iimin; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_n_s_j; +static PyObject *__pyx_n_s_jj; +static PyObject *__pyx_n_s_jjj; +static PyObject *__pyx_n_s_jjmax; +static PyObject *__pyx_n_s_jjmin; +static PyObject *__pyx_n_s_k; +static PyObject *__pyx_n_s_ker; +static PyObject *__pyx_n_s_kk; +static PyObject *__pyx_n_s_kkk; +static PyObject *__pyx_n_s_kkmax; +static PyObject *__pyx_n_s_kkmin; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; +static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; +static PyObject *__pyx_n_s_nkx; +static PyObject *__pyx_n_s_nky; +static PyObject *__pyx_n_s_nkz; +static PyObject *__pyx_n_s_normalize_by_kernel; +static PyObject *__pyx_n_s_np; +static PyObject *__pyx_n_s_numpy; +static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; +static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; +static PyObject *__pyx_n_s_nx; +static PyObject *__pyx_n_s_ny; +static PyObject *__pyx_n_s_nz; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_n_s_top; +static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; +static PyObject *__pyx_n_s_val; +static PyObject *__pyx_n_s_wkx; +static PyObject *__pyx_n_s_wky; +static PyObject *__pyx_n_s_wkz; +static PyObject *__pyx_pf_7astropy_11convolution_15boundary_extend_convolve1d_boundary_extend(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel); /* proto */ +static PyObject *__pyx_pf_7astropy_11convolution_15boundary_extend_2convolve2d_boundary_extend(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel); /* proto */ +static PyObject *__pyx_pf_7astropy_11convolution_15boundary_extend_4convolve3d_boundary_extend(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel); /* proto */ +static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ +static PyObject *__pyx_tuple_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_tuple__7; +static PyObject *__pyx_tuple__8; +static PyObject *__pyx_tuple__9; +static PyObject *__pyx_tuple__10; +static PyObject *__pyx_tuple__11; +static PyObject *__pyx_tuple__12; +static PyObject *__pyx_tuple__13; +static PyObject *__pyx_tuple__15; +static PyObject *__pyx_tuple__17; +static PyObject *__pyx_codeobj__14; +static PyObject *__pyx_codeobj__16; +static PyObject *__pyx_codeobj__18; + +/* "astropy/convolution/boundary_extend.pyx":9 + * ctypedef np.float_t DTYPE_t + * + * cdef inline int int_max(int a, int b) nogil: return a if a >= b else b # <<<<<<<<<<<<<< + * cdef inline int int_min(int a, int b) nogil: return a if a <= b else b + * + */ + +static CYTHON_INLINE int __pyx_f_7astropy_11convolution_15boundary_extend_int_max(int __pyx_v_a, int __pyx_v_b) { + int __pyx_r; + int __pyx_t_1; + if (((__pyx_v_a >= __pyx_v_b) != 0)) { + __pyx_t_1 = __pyx_v_a; + } else { + __pyx_t_1 = __pyx_v_b; + } + __pyx_r = __pyx_t_1; + goto __pyx_L0; + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "astropy/convolution/boundary_extend.pyx":10 + * + * cdef inline int int_max(int a, int b) nogil: return a if a >= b else b + * cdef inline int int_min(int a, int b) nogil: return a if a <= b else b # <<<<<<<<<<<<<< + * + * cdef extern from "numpy/npy_math.h" nogil: + */ + +static CYTHON_INLINE int __pyx_f_7astropy_11convolution_15boundary_extend_int_min(int __pyx_v_a, int __pyx_v_b) { + int __pyx_r; + int __pyx_t_1; + if (((__pyx_v_a <= __pyx_v_b) != 0)) { + __pyx_t_1 = __pyx_v_a; + } else { + __pyx_t_1 = __pyx_v_b; + } + __pyx_r = __pyx_t_1; + goto __pyx_L0; + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "astropy/convolution/boundary_extend.pyx":19 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_extend(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * bint normalize_by_kernel + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_11convolution_15boundary_extend_1convolve1d_boundary_extend(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_11convolution_15boundary_extend_1convolve1d_boundary_extend = {"convolve1d_boundary_extend", (PyCFunction)__pyx_pw_7astropy_11convolution_15boundary_extend_1convolve1d_boundary_extend, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_11convolution_15boundary_extend_1convolve1d_boundary_extend(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_f = 0; + PyArrayObject *__pyx_v_g = 0; + int __pyx_v_normalize_by_kernel; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("convolve1d_boundary_extend (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_f,&__pyx_n_s_g,&__pyx_n_s_normalize_by_kernel,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_f)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_g)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve1d_boundary_extend", 1, 3, 3, 1); __PYX_ERR(0, 19, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_normalize_by_kernel)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve1d_boundary_extend", 1, 3, 3, 2); __PYX_ERR(0, 19, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "convolve1d_boundary_extend") < 0)) __PYX_ERR(0, 19, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_f = ((PyArrayObject *)values[0]); + __pyx_v_g = ((PyArrayObject *)values[1]); + __pyx_v_normalize_by_kernel = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_normalize_by_kernel == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("convolve1d_boundary_extend", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 19, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.convolution.boundary_extend.convolve1d_boundary_extend", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_f), __pyx_ptype_5numpy_ndarray, 1, "f", 0))) __PYX_ERR(0, 19, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_g), __pyx_ptype_5numpy_ndarray, 1, "g", 0))) __PYX_ERR(0, 20, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_11convolution_15boundary_extend_convolve1d_boundary_extend(__pyx_self, __pyx_v_f, __pyx_v_g, __pyx_v_normalize_by_kernel); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_11convolution_15boundary_extend_convolve1d_boundary_extend(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel) { + int __pyx_v_nx; + int __pyx_v_nkx; + int __pyx_v_wkx; + PyArrayObject *__pyx_v_conv = 0; + unsigned int __pyx_v_i; + unsigned int __pyx_v_iii; + int __pyx_v_ii; + int __pyx_v_iimin; + int __pyx_v_iimax; + __pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t __pyx_v_top; + __pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t __pyx_v_bot; + __pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t __pyx_v_ker; + __pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t __pyx_v_val; + __Pyx_LocalBuf_ND __pyx_pybuffernd_conv; + __Pyx_Buffer __pyx_pybuffer_conv; + __Pyx_LocalBuf_ND __pyx_pybuffernd_f; + __Pyx_Buffer __pyx_pybuffer_f; + __Pyx_LocalBuf_ND __pyx_pybuffernd_g; + __Pyx_Buffer __pyx_pybuffer_g; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + PyArrayObject *__pyx_t_7 = NULL; + int __pyx_t_8; + unsigned int __pyx_t_9; + int __pyx_t_10; + int __pyx_t_11; + size_t __pyx_t_12; + size_t __pyx_t_13; + size_t __pyx_t_14; + size_t __pyx_t_15; + size_t __pyx_t_16; + size_t __pyx_t_17; + __Pyx_RefNannySetupContext("convolve1d_boundary_extend", 0); + __pyx_pybuffer_conv.pybuffer.buf = NULL; + __pyx_pybuffer_conv.refcount = 0; + __pyx_pybuffernd_conv.data = NULL; + __pyx_pybuffernd_conv.rcbuffer = &__pyx_pybuffer_conv; + __pyx_pybuffer_f.pybuffer.buf = NULL; + __pyx_pybuffer_f.refcount = 0; + __pyx_pybuffernd_f.data = NULL; + __pyx_pybuffernd_f.rcbuffer = &__pyx_pybuffer_f; + __pyx_pybuffer_g.pybuffer.buf = NULL; + __pyx_pybuffer_g.refcount = 0; + __pyx_pybuffernd_g.data = NULL; + __pyx_pybuffernd_g.rcbuffer = &__pyx_pybuffer_g; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_f.rcbuffer->pybuffer, (PyObject*)__pyx_v_f, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error) + } + __pyx_pybuffernd_f.diminfo[0].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_f.diminfo[0].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[0]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_g.rcbuffer->pybuffer, (PyObject*)__pyx_v_g, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error) + } + __pyx_pybuffernd_g.diminfo[0].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_g.diminfo[0].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[0]; + + /* "astropy/convolution/boundary_extend.pyx":24 + * ): + * + * if g.shape[0] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + __pyx_t_1 = ((__Pyx_mod_long((__pyx_v_g->dimensions[0]), 2) != 1) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_extend.pyx":25 + * + * if g.shape[0] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(0, 25, __pyx_L1_error) + + /* "astropy/convolution/boundary_extend.pyx":24 + * ): + * + * if g.shape[0] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + } + + /* "astropy/convolution/boundary_extend.pyx":27 + * raise ValueError("Convolution kernel must have odd dimensions") + * + * assert f.dtype == DTYPE and g.dtype == DTYPE # <<<<<<<<<<<<<< + * + * cdef int nx = f.shape[0] + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_f), __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 27, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 27, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 27, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 27, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_5) { + } else { + __pyx_t_1 = __pyx_t_5; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_g), __pyx_n_s_dtype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 27, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 27, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = PyObject_RichCompare(__pyx_t_4, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 27, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 27, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_1 = __pyx_t_5; + __pyx_L4_bool_binop_done:; + if (unlikely(!__pyx_t_1)) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 27, __pyx_L1_error) + } + } + #endif + + /* "astropy/convolution/boundary_extend.pyx":29 + * assert f.dtype == DTYPE and g.dtype == DTYPE + * + * cdef int nx = f.shape[0] # <<<<<<<<<<<<<< + * cdef int nkx = g.shape[0] + * cdef int wkx = nkx // 2 + */ + __pyx_v_nx = (__pyx_v_f->dimensions[0]); + + /* "astropy/convolution/boundary_extend.pyx":30 + * + * cdef int nx = f.shape[0] + * cdef int nkx = g.shape[0] # <<<<<<<<<<<<<< + * cdef int wkx = nkx // 2 + * cdef np.ndarray[DTYPE_t, ndim=1] conv = np.empty([nx], dtype=DTYPE) + */ + __pyx_v_nkx = (__pyx_v_g->dimensions[0]); + + /* "astropy/convolution/boundary_extend.pyx":31 + * cdef int nx = f.shape[0] + * cdef int nkx = g.shape[0] + * cdef int wkx = nkx // 2 # <<<<<<<<<<<<<< + * cdef np.ndarray[DTYPE_t, ndim=1] conv = np.empty([nx], dtype=DTYPE) + * cdef unsigned int i, iii + */ + __pyx_v_wkx = __Pyx_div_long(__pyx_v_nkx, 2); + + /* "astropy/convolution/boundary_extend.pyx":32 + * cdef int nkx = g.shape[0] + * cdef int wkx = nkx // 2 + * cdef np.ndarray[DTYPE_t, ndim=1] conv = np.empty([nx], dtype=DTYPE) # <<<<<<<<<<<<<< + * cdef unsigned int i, iii + * cdef int ii + */ + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_empty); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = PyList_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_2); + PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); + __pyx_t_2 = 0; + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 32, __pyx_L1_error) + __pyx_t_7 = ((PyArrayObject *)__pyx_t_6); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_conv.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { + __pyx_v_conv = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 32, __pyx_L1_error) + } else {__pyx_pybuffernd_conv.diminfo[0].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_conv.diminfo[0].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[0]; + } + } + __pyx_t_7 = 0; + __pyx_v_conv = ((PyArrayObject *)__pyx_t_6); + __pyx_t_6 = 0; + + /* "astropy/convolution/boundary_extend.pyx":41 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + + /* "astropy/convolution/boundary_extend.pyx":44 + * + * # Now run the proper convolution + * for i in range(nx): # <<<<<<<<<<<<<< + * top = 0. + * bot = 0. + */ + __pyx_t_8 = __pyx_v_nx; + for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { + __pyx_v_i = __pyx_t_9; + + /* "astropy/convolution/boundary_extend.pyx":45 + * # Now run the proper convolution + * for i in range(nx): + * top = 0. # <<<<<<<<<<<<<< + * bot = 0. + * iimin = i - wkx + */ + __pyx_v_top = 0.; + + /* "astropy/convolution/boundary_extend.pyx":46 + * for i in range(nx): + * top = 0. + * bot = 0. # <<<<<<<<<<<<<< + * iimin = i - wkx + * iimax = i + wkx + 1 + */ + __pyx_v_bot = 0.; + + /* "astropy/convolution/boundary_extend.pyx":47 + * top = 0. + * bot = 0. + * iimin = i - wkx # <<<<<<<<<<<<<< + * iimax = i + wkx + 1 + * for ii in range(iimin, iimax): + */ + __pyx_v_iimin = (__pyx_v_i - __pyx_v_wkx); + + /* "astropy/convolution/boundary_extend.pyx":48 + * bot = 0. + * iimin = i - wkx + * iimax = i + wkx + 1 # <<<<<<<<<<<<<< + * for ii in range(iimin, iimax): + * iii = int_min(int_max(ii, 0), nx - 1) + */ + __pyx_v_iimax = ((__pyx_v_i + __pyx_v_wkx) + 1); + + /* "astropy/convolution/boundary_extend.pyx":49 + * iimin = i - wkx + * iimax = i + wkx + 1 + * for ii in range(iimin, iimax): # <<<<<<<<<<<<<< + * iii = int_min(int_max(ii, 0), nx - 1) + * val = f[iii] + */ + __pyx_t_10 = __pyx_v_iimax; + for (__pyx_t_11 = __pyx_v_iimin; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { + __pyx_v_ii = __pyx_t_11; + + /* "astropy/convolution/boundary_extend.pyx":50 + * iimax = i + wkx + 1 + * for ii in range(iimin, iimax): + * iii = int_min(int_max(ii, 0), nx - 1) # <<<<<<<<<<<<<< + * val = f[iii] + * ker = g[(nkx - 1 - (wkx + ii - i))] + */ + __pyx_v_iii = __pyx_f_7astropy_11convolution_15boundary_extend_int_min(__pyx_f_7astropy_11convolution_15boundary_extend_int_max(__pyx_v_ii, 0), (__pyx_v_nx - 1)); + + /* "astropy/convolution/boundary_extend.pyx":51 + * for ii in range(iimin, iimax): + * iii = int_min(int_max(ii, 0), nx - 1) + * val = f[iii] # <<<<<<<<<<<<<< + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): + */ + __pyx_t_12 = __pyx_v_iii; + __pyx_v_val = (*__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_f.diminfo[0].strides)); + + /* "astropy/convolution/boundary_extend.pyx":52 + * iii = int_min(int_max(ii, 0), nx - 1) + * val = f[iii] + * ker = g[(nkx - 1 - (wkx + ii - i))] # <<<<<<<<<<<<<< + * if not npy_isnan(val): + * top += val * ker + */ + __pyx_t_13 = ((unsigned int)((__pyx_v_nkx - 1) - ((__pyx_v_wkx + __pyx_v_ii) - __pyx_v_i))); + __pyx_v_ker = (*__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_g.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_g.diminfo[0].strides)); + + /* "astropy/convolution/boundary_extend.pyx":53 + * val = f[iii] + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + __pyx_t_1 = ((!(npy_isnan(__pyx_v_val) != 0)) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_extend.pyx":54 + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): + * top += val * ker # <<<<<<<<<<<<<< + * bot += ker + * if normalize_by_kernel: + */ + __pyx_v_top = (__pyx_v_top + (__pyx_v_val * __pyx_v_ker)); + + /* "astropy/convolution/boundary_extend.pyx":55 + * if not npy_isnan(val): + * top += val * ker + * bot += ker # <<<<<<<<<<<<<< + * if normalize_by_kernel: + * if bot == 0: + */ + __pyx_v_bot = (__pyx_v_bot + __pyx_v_ker); + + /* "astropy/convolution/boundary_extend.pyx":53 + * val = f[iii] + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + } + } + + /* "astropy/convolution/boundary_extend.pyx":56 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i] = f[i] + */ + __pyx_t_1 = (__pyx_v_normalize_by_kernel != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_extend.pyx":57 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i] = f[i] + * else: + */ + __pyx_t_1 = ((__pyx_v_bot == 0.0) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_extend.pyx":58 + * if normalize_by_kernel: + * if bot == 0: + * conv[i] = f[i] # <<<<<<<<<<<<<< + * else: + * conv[i] = top / bot + */ + __pyx_t_14 = __pyx_v_i; + __pyx_t_15 = __pyx_v_i; + *__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_conv.diminfo[0].strides) = (*__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_f.diminfo[0].strides)); + + /* "astropy/convolution/boundary_extend.pyx":57 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i] = f[i] + * else: + */ + goto __pyx_L15; + } + + /* "astropy/convolution/boundary_extend.pyx":60 + * conv[i] = f[i] + * else: + * conv[i] = top / bot # <<<<<<<<<<<<<< + * else: + * conv[i] = top + */ + /*else*/ { + if (unlikely(__pyx_v_bot == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 60, __pyx_L7_error) + } + __pyx_t_16 = __pyx_v_i; + *__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_conv.diminfo[0].strides) = (__pyx_v_top / __pyx_v_bot); + } + __pyx_L15:; + + /* "astropy/convolution/boundary_extend.pyx":56 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i] = f[i] + */ + goto __pyx_L14; + } + + /* "astropy/convolution/boundary_extend.pyx":62 + * conv[i] = top / bot + * else: + * conv[i] = top # <<<<<<<<<<<<<< + * # GIL acquired again here + * return conv + */ + /*else*/ { + __pyx_t_17 = __pyx_v_i; + *__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_conv.diminfo[0].strides) = __pyx_v_top; + } + __pyx_L14:; + } + } + + /* "astropy/convolution/boundary_extend.pyx":41 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L8; + } + __pyx_L7_error: { + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L1_error; + } + __pyx_L8:; + } + } + + /* "astropy/convolution/boundary_extend.pyx":64 + * conv[i] = top + * # GIL acquired again here + * return conv # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_conv)); + __pyx_r = ((PyObject *)__pyx_v_conv); + goto __pyx_L0; + + /* "astropy/convolution/boundary_extend.pyx":19 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_extend(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * bint normalize_by_kernel + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_6); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("astropy.convolution.boundary_extend.convolve1d_boundary_extend", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_conv); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/convolution/boundary_extend.pyx":68 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_extend(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * bint normalize_by_kernel): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_11convolution_15boundary_extend_3convolve2d_boundary_extend(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_11convolution_15boundary_extend_3convolve2d_boundary_extend = {"convolve2d_boundary_extend", (PyCFunction)__pyx_pw_7astropy_11convolution_15boundary_extend_3convolve2d_boundary_extend, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_11convolution_15boundary_extend_3convolve2d_boundary_extend(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_f = 0; + PyArrayObject *__pyx_v_g = 0; + int __pyx_v_normalize_by_kernel; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("convolve2d_boundary_extend (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_f,&__pyx_n_s_g,&__pyx_n_s_normalize_by_kernel,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_f)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_g)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve2d_boundary_extend", 1, 3, 3, 1); __PYX_ERR(0, 68, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_normalize_by_kernel)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve2d_boundary_extend", 1, 3, 3, 2); __PYX_ERR(0, 68, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "convolve2d_boundary_extend") < 0)) __PYX_ERR(0, 68, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_f = ((PyArrayObject *)values[0]); + __pyx_v_g = ((PyArrayObject *)values[1]); + __pyx_v_normalize_by_kernel = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_normalize_by_kernel == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 70, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("convolve2d_boundary_extend", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 68, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.convolution.boundary_extend.convolve2d_boundary_extend", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_f), __pyx_ptype_5numpy_ndarray, 1, "f", 0))) __PYX_ERR(0, 68, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_g), __pyx_ptype_5numpy_ndarray, 1, "g", 0))) __PYX_ERR(0, 69, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_11convolution_15boundary_extend_2convolve2d_boundary_extend(__pyx_self, __pyx_v_f, __pyx_v_g, __pyx_v_normalize_by_kernel); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_11convolution_15boundary_extend_2convolve2d_boundary_extend(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel) { + int __pyx_v_nx; + int __pyx_v_ny; + int __pyx_v_nkx; + int __pyx_v_nky; + int __pyx_v_wkx; + int __pyx_v_wky; + PyArrayObject *__pyx_v_conv = 0; + unsigned int __pyx_v_i; + unsigned int __pyx_v_j; + unsigned int __pyx_v_iii; + unsigned int __pyx_v_jjj; + int __pyx_v_ii; + int __pyx_v_jj; + int __pyx_v_iimin; + int __pyx_v_iimax; + int __pyx_v_jjmin; + int __pyx_v_jjmax; + __pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t __pyx_v_top; + __pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t __pyx_v_bot; + __pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t __pyx_v_ker; + __pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t __pyx_v_val; + __Pyx_LocalBuf_ND __pyx_pybuffernd_conv; + __Pyx_Buffer __pyx_pybuffer_conv; + __Pyx_LocalBuf_ND __pyx_pybuffernd_f; + __Pyx_Buffer __pyx_pybuffer_f; + __Pyx_LocalBuf_ND __pyx_pybuffernd_g; + __Pyx_Buffer __pyx_pybuffer_g; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyArrayObject *__pyx_t_7 = NULL; + int __pyx_t_8; + unsigned int __pyx_t_9; + int __pyx_t_10; + unsigned int __pyx_t_11; + int __pyx_t_12; + int __pyx_t_13; + int __pyx_t_14; + int __pyx_t_15; + size_t __pyx_t_16; + size_t __pyx_t_17; + size_t __pyx_t_18; + size_t __pyx_t_19; + size_t __pyx_t_20; + size_t __pyx_t_21; + size_t __pyx_t_22; + size_t __pyx_t_23; + size_t __pyx_t_24; + size_t __pyx_t_25; + size_t __pyx_t_26; + size_t __pyx_t_27; + __Pyx_RefNannySetupContext("convolve2d_boundary_extend", 0); + __pyx_pybuffer_conv.pybuffer.buf = NULL; + __pyx_pybuffer_conv.refcount = 0; + __pyx_pybuffernd_conv.data = NULL; + __pyx_pybuffernd_conv.rcbuffer = &__pyx_pybuffer_conv; + __pyx_pybuffer_f.pybuffer.buf = NULL; + __pyx_pybuffer_f.refcount = 0; + __pyx_pybuffernd_f.data = NULL; + __pyx_pybuffernd_f.rcbuffer = &__pyx_pybuffer_f; + __pyx_pybuffer_g.pybuffer.buf = NULL; + __pyx_pybuffer_g.refcount = 0; + __pyx_pybuffernd_g.data = NULL; + __pyx_pybuffernd_g.rcbuffer = &__pyx_pybuffer_g; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_f.rcbuffer->pybuffer, (PyObject*)__pyx_v_f, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 68, __pyx_L1_error) + } + __pyx_pybuffernd_f.diminfo[0].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_f.diminfo[0].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_f.diminfo[1].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_f.diminfo[1].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[1]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_g.rcbuffer->pybuffer, (PyObject*)__pyx_v_g, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 68, __pyx_L1_error) + } + __pyx_pybuffernd_g.diminfo[0].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_g.diminfo[0].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_g.diminfo[1].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_g.diminfo[1].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[1]; + + /* "astropy/convolution/boundary_extend.pyx":72 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[0]), 2) != 1) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[1]), 2) != 1) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_extend.pyx":73 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 73, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(0, 73, __pyx_L1_error) + + /* "astropy/convolution/boundary_extend.pyx":72 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + } + + /* "astropy/convolution/boundary_extend.pyx":75 + * raise ValueError("Convolution kernel must have odd dimensions") + * + * assert f.dtype == DTYPE and g.dtype == DTYPE # <<<<<<<<<<<<<< + * + * cdef int nx = f.shape[0] + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_f), __pyx_n_s_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 75, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 75, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 75, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 75, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L6_bool_binop_done; + } + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_g), __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 75, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 75, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 75, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 75, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_1 = __pyx_t_2; + __pyx_L6_bool_binop_done:; + if (unlikely(!__pyx_t_1)) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 75, __pyx_L1_error) + } + } + #endif + + /* "astropy/convolution/boundary_extend.pyx":77 + * assert f.dtype == DTYPE and g.dtype == DTYPE + * + * cdef int nx = f.shape[0] # <<<<<<<<<<<<<< + * cdef int ny = f.shape[1] + * cdef int nkx = g.shape[0] + */ + __pyx_v_nx = (__pyx_v_f->dimensions[0]); + + /* "astropy/convolution/boundary_extend.pyx":78 + * + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] # <<<<<<<<<<<<<< + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + */ + __pyx_v_ny = (__pyx_v_f->dimensions[1]); + + /* "astropy/convolution/boundary_extend.pyx":79 + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] + * cdef int nkx = g.shape[0] # <<<<<<<<<<<<<< + * cdef int nky = g.shape[1] + * cdef int wkx = nkx // 2 + */ + __pyx_v_nkx = (__pyx_v_g->dimensions[0]); + + /* "astropy/convolution/boundary_extend.pyx":80 + * cdef int ny = f.shape[1] + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] # <<<<<<<<<<<<<< + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + */ + __pyx_v_nky = (__pyx_v_g->dimensions[1]); + + /* "astropy/convolution/boundary_extend.pyx":81 + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + * cdef int wkx = nkx // 2 # <<<<<<<<<<<<<< + * cdef int wky = nky // 2 + * cdef np.ndarray[DTYPE_t, ndim=2] conv = np.empty([nx, ny], dtype=DTYPE) + */ + __pyx_v_wkx = __Pyx_div_long(__pyx_v_nkx, 2); + + /* "astropy/convolution/boundary_extend.pyx":82 + * cdef int nky = g.shape[1] + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 # <<<<<<<<<<<<<< + * cdef np.ndarray[DTYPE_t, ndim=2] conv = np.empty([nx, ny], dtype=DTYPE) + * cdef unsigned int i, j, iii, jjj + */ + __pyx_v_wky = __Pyx_div_long(__pyx_v_nky, 2); + + /* "astropy/convolution/boundary_extend.pyx":83 + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + * cdef np.ndarray[DTYPE_t, ndim=2] conv = np.empty([nx, ny], dtype=DTYPE) # <<<<<<<<<<<<<< + * cdef unsigned int i, j, iii, jjj + * cdef int ii, jj + */ + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_ny); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_3); + PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_5); + PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_5); + __pyx_t_3 = 0; + __pyx_t_5 = 0; + __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 83, __pyx_L1_error) + __pyx_t_7 = ((PyArrayObject *)__pyx_t_3); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_conv.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { + __pyx_v_conv = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 83, __pyx_L1_error) + } else {__pyx_pybuffernd_conv.diminfo[0].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_conv.diminfo[0].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_conv.diminfo[1].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_conv.diminfo[1].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[1]; + } + } + __pyx_t_7 = 0; + __pyx_v_conv = ((PyArrayObject *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "astropy/convolution/boundary_extend.pyx":92 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + + /* "astropy/convolution/boundary_extend.pyx":95 + * + * # Now run the proper convolution + * for i in range(nx): # <<<<<<<<<<<<<< + * for j in range(ny): + * top = 0. + */ + __pyx_t_8 = __pyx_v_nx; + for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { + __pyx_v_i = __pyx_t_9; + + /* "astropy/convolution/boundary_extend.pyx":96 + * # Now run the proper convolution + * for i in range(nx): + * for j in range(ny): # <<<<<<<<<<<<<< + * top = 0. + * bot = 0. + */ + __pyx_t_10 = __pyx_v_ny; + for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { + __pyx_v_j = __pyx_t_11; + + /* "astropy/convolution/boundary_extend.pyx":97 + * for i in range(nx): + * for j in range(ny): + * top = 0. # <<<<<<<<<<<<<< + * bot = 0. + * iimin = i - wkx + */ + __pyx_v_top = 0.; + + /* "astropy/convolution/boundary_extend.pyx":98 + * for j in range(ny): + * top = 0. + * bot = 0. # <<<<<<<<<<<<<< + * iimin = i - wkx + * iimax = i + wkx + 1 + */ + __pyx_v_bot = 0.; + + /* "astropy/convolution/boundary_extend.pyx":99 + * top = 0. + * bot = 0. + * iimin = i - wkx # <<<<<<<<<<<<<< + * iimax = i + wkx + 1 + * jjmin = j - wky + */ + __pyx_v_iimin = (__pyx_v_i - __pyx_v_wkx); + + /* "astropy/convolution/boundary_extend.pyx":100 + * bot = 0. + * iimin = i - wkx + * iimax = i + wkx + 1 # <<<<<<<<<<<<<< + * jjmin = j - wky + * jjmax = j + wky + 1 + */ + __pyx_v_iimax = ((__pyx_v_i + __pyx_v_wkx) + 1); + + /* "astropy/convolution/boundary_extend.pyx":101 + * iimin = i - wkx + * iimax = i + wkx + 1 + * jjmin = j - wky # <<<<<<<<<<<<<< + * jjmax = j + wky + 1 + * for ii in range(iimin, iimax): + */ + __pyx_v_jjmin = (__pyx_v_j - __pyx_v_wky); + + /* "astropy/convolution/boundary_extend.pyx":102 + * iimax = i + wkx + 1 + * jjmin = j - wky + * jjmax = j + wky + 1 # <<<<<<<<<<<<<< + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): + */ + __pyx_v_jjmax = ((__pyx_v_j + __pyx_v_wky) + 1); + + /* "astropy/convolution/boundary_extend.pyx":103 + * jjmin = j - wky + * jjmax = j + wky + 1 + * for ii in range(iimin, iimax): # <<<<<<<<<<<<<< + * for jj in range(jjmin, jjmax): + * iii = int_min(int_max(ii, 0), nx - 1) + */ + __pyx_t_12 = __pyx_v_iimax; + for (__pyx_t_13 = __pyx_v_iimin; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { + __pyx_v_ii = __pyx_t_13; + + /* "astropy/convolution/boundary_extend.pyx":104 + * jjmax = j + wky + 1 + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): # <<<<<<<<<<<<<< + * iii = int_min(int_max(ii, 0), nx - 1) + * jjj = int_min(int_max(jj, 0), ny - 1) + */ + __pyx_t_14 = __pyx_v_jjmax; + for (__pyx_t_15 = __pyx_v_jjmin; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { + __pyx_v_jj = __pyx_t_15; + + /* "astropy/convolution/boundary_extend.pyx":105 + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): + * iii = int_min(int_max(ii, 0), nx - 1) # <<<<<<<<<<<<<< + * jjj = int_min(int_max(jj, 0), ny - 1) + * val = f[iii, jjj] + */ + __pyx_v_iii = __pyx_f_7astropy_11convolution_15boundary_extend_int_min(__pyx_f_7astropy_11convolution_15boundary_extend_int_max(__pyx_v_ii, 0), (__pyx_v_nx - 1)); + + /* "astropy/convolution/boundary_extend.pyx":106 + * for jj in range(jjmin, jjmax): + * iii = int_min(int_max(ii, 0), nx - 1) + * jjj = int_min(int_max(jj, 0), ny - 1) # <<<<<<<<<<<<<< + * val = f[iii, jjj] + * ker = g[(nkx - 1 - (wkx + ii - i)), + */ + __pyx_v_jjj = __pyx_f_7astropy_11convolution_15boundary_extend_int_min(__pyx_f_7astropy_11convolution_15boundary_extend_int_max(__pyx_v_jj, 0), (__pyx_v_ny - 1)); + + /* "astropy/convolution/boundary_extend.pyx":107 + * iii = int_min(int_max(ii, 0), nx - 1) + * jjj = int_min(int_max(jj, 0), ny - 1) + * val = f[iii, jjj] # <<<<<<<<<<<<<< + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j))] + */ + __pyx_t_16 = __pyx_v_iii; + __pyx_t_17 = __pyx_v_jjj; + __pyx_v_val = (*__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_f.diminfo[1].strides)); + + /* "astropy/convolution/boundary_extend.pyx":108 + * jjj = int_min(int_max(jj, 0), ny - 1) + * val = f[iii, jjj] + * ker = g[(nkx - 1 - (wkx + ii - i)), # <<<<<<<<<<<<<< + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): + */ + __pyx_t_18 = ((unsigned int)((__pyx_v_nkx - 1) - ((__pyx_v_wkx + __pyx_v_ii) - __pyx_v_i))); + __pyx_t_19 = ((unsigned int)((__pyx_v_nky - 1) - ((__pyx_v_wky + __pyx_v_jj) - __pyx_v_j))); + __pyx_v_ker = (*__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_g.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_g.diminfo[0].strides, __pyx_t_19, __pyx_pybuffernd_g.diminfo[1].strides)); + + /* "astropy/convolution/boundary_extend.pyx":110 + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + __pyx_t_1 = ((!(npy_isnan(__pyx_v_val) != 0)) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_extend.pyx":111 + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): + * top += val * ker # <<<<<<<<<<<<<< + * bot += ker + * if normalize_by_kernel: + */ + __pyx_v_top = (__pyx_v_top + (__pyx_v_val * __pyx_v_ker)); + + /* "astropy/convolution/boundary_extend.pyx":112 + * if not npy_isnan(val): + * top += val * ker + * bot += ker # <<<<<<<<<<<<<< + * if normalize_by_kernel: + * if bot == 0: + */ + __pyx_v_bot = (__pyx_v_bot + __pyx_v_ker); + + /* "astropy/convolution/boundary_extend.pyx":110 + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + } + } + } + + /* "astropy/convolution/boundary_extend.pyx":113 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j] = f[i, j] + */ + __pyx_t_1 = (__pyx_v_normalize_by_kernel != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_extend.pyx":114 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j] = f[i, j] + * else: + */ + __pyx_t_1 = ((__pyx_v_bot == 0.0) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_extend.pyx":115 + * if normalize_by_kernel: + * if bot == 0: + * conv[i, j] = f[i, j] # <<<<<<<<<<<<<< + * else: + * conv[i, j] = top / bot + */ + __pyx_t_20 = __pyx_v_i; + __pyx_t_21 = __pyx_v_j; + __pyx_t_22 = __pyx_v_i; + __pyx_t_23 = __pyx_v_j; + *__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_23, __pyx_pybuffernd_conv.diminfo[1].strides) = (*__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_20, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_21, __pyx_pybuffernd_f.diminfo[1].strides)); + + /* "astropy/convolution/boundary_extend.pyx":114 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j] = f[i, j] + * else: + */ + goto __pyx_L21; + } + + /* "astropy/convolution/boundary_extend.pyx":117 + * conv[i, j] = f[i, j] + * else: + * conv[i, j] = top / bot # <<<<<<<<<<<<<< + * else: + * conv[i, j] = top + */ + /*else*/ { + if (unlikely(__pyx_v_bot == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 117, __pyx_L9_error) + } + __pyx_t_24 = __pyx_v_i; + __pyx_t_25 = __pyx_v_j; + *__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_24, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_25, __pyx_pybuffernd_conv.diminfo[1].strides) = (__pyx_v_top / __pyx_v_bot); + } + __pyx_L21:; + + /* "astropy/convolution/boundary_extend.pyx":113 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j] = f[i, j] + */ + goto __pyx_L20; + } + + /* "astropy/convolution/boundary_extend.pyx":119 + * conv[i, j] = top / bot + * else: + * conv[i, j] = top # <<<<<<<<<<<<<< + * # GIL acquired again here + * return conv + */ + /*else*/ { + __pyx_t_26 = __pyx_v_i; + __pyx_t_27 = __pyx_v_j; + *__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_26, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_27, __pyx_pybuffernd_conv.diminfo[1].strides) = __pyx_v_top; + } + __pyx_L20:; + } + } + } + + /* "astropy/convolution/boundary_extend.pyx":92 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L10; + } + __pyx_L9_error: { + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L1_error; + } + __pyx_L10:; + } + } + + /* "astropy/convolution/boundary_extend.pyx":121 + * conv[i, j] = top + * # GIL acquired again here + * return conv # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_conv)); + __pyx_r = ((PyObject *)__pyx_v_conv); + goto __pyx_L0; + + /* "astropy/convolution/boundary_extend.pyx":68 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_extend(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * bint normalize_by_kernel): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("astropy.convolution.boundary_extend.convolve2d_boundary_extend", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_conv); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/convolution/boundary_extend.pyx":125 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_extend(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * bint normalize_by_kernel): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_11convolution_15boundary_extend_5convolve3d_boundary_extend(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_11convolution_15boundary_extend_5convolve3d_boundary_extend = {"convolve3d_boundary_extend", (PyCFunction)__pyx_pw_7astropy_11convolution_15boundary_extend_5convolve3d_boundary_extend, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_11convolution_15boundary_extend_5convolve3d_boundary_extend(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_f = 0; + PyArrayObject *__pyx_v_g = 0; + int __pyx_v_normalize_by_kernel; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("convolve3d_boundary_extend (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_f,&__pyx_n_s_g,&__pyx_n_s_normalize_by_kernel,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_f)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_g)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve3d_boundary_extend", 1, 3, 3, 1); __PYX_ERR(0, 125, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_normalize_by_kernel)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve3d_boundary_extend", 1, 3, 3, 2); __PYX_ERR(0, 125, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "convolve3d_boundary_extend") < 0)) __PYX_ERR(0, 125, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_f = ((PyArrayObject *)values[0]); + __pyx_v_g = ((PyArrayObject *)values[1]); + __pyx_v_normalize_by_kernel = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_normalize_by_kernel == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 127, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("convolve3d_boundary_extend", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 125, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.convolution.boundary_extend.convolve3d_boundary_extend", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_f), __pyx_ptype_5numpy_ndarray, 1, "f", 0))) __PYX_ERR(0, 125, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_g), __pyx_ptype_5numpy_ndarray, 1, "g", 0))) __PYX_ERR(0, 126, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_11convolution_15boundary_extend_4convolve3d_boundary_extend(__pyx_self, __pyx_v_f, __pyx_v_g, __pyx_v_normalize_by_kernel); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_11convolution_15boundary_extend_4convolve3d_boundary_extend(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel) { + int __pyx_v_nx; + int __pyx_v_ny; + int __pyx_v_nz; + int __pyx_v_nkx; + int __pyx_v_nky; + int __pyx_v_nkz; + int __pyx_v_wkx; + int __pyx_v_wky; + int __pyx_v_wkz; + PyArrayObject *__pyx_v_conv = 0; + unsigned int __pyx_v_i; + unsigned int __pyx_v_j; + unsigned int __pyx_v_k; + unsigned int __pyx_v_iii; + unsigned int __pyx_v_jjj; + unsigned int __pyx_v_kkk; + int __pyx_v_ii; + int __pyx_v_jj; + int __pyx_v_kk; + int __pyx_v_iimin; + int __pyx_v_iimax; + int __pyx_v_jjmin; + int __pyx_v_jjmax; + int __pyx_v_kkmin; + int __pyx_v_kkmax; + __pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t __pyx_v_top; + __pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t __pyx_v_bot; + __pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t __pyx_v_ker; + __pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t __pyx_v_val; + __Pyx_LocalBuf_ND __pyx_pybuffernd_conv; + __Pyx_Buffer __pyx_pybuffer_conv; + __Pyx_LocalBuf_ND __pyx_pybuffernd_f; + __Pyx_Buffer __pyx_pybuffer_f; + __Pyx_LocalBuf_ND __pyx_pybuffernd_g; + __Pyx_Buffer __pyx_pybuffer_g; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyArrayObject *__pyx_t_8 = NULL; + int __pyx_t_9; + unsigned int __pyx_t_10; + int __pyx_t_11; + unsigned int __pyx_t_12; + int __pyx_t_13; + unsigned int __pyx_t_14; + int __pyx_t_15; + int __pyx_t_16; + int __pyx_t_17; + int __pyx_t_18; + int __pyx_t_19; + int __pyx_t_20; + size_t __pyx_t_21; + size_t __pyx_t_22; + size_t __pyx_t_23; + size_t __pyx_t_24; + size_t __pyx_t_25; + size_t __pyx_t_26; + size_t __pyx_t_27; + size_t __pyx_t_28; + size_t __pyx_t_29; + size_t __pyx_t_30; + size_t __pyx_t_31; + size_t __pyx_t_32; + size_t __pyx_t_33; + size_t __pyx_t_34; + size_t __pyx_t_35; + size_t __pyx_t_36; + size_t __pyx_t_37; + size_t __pyx_t_38; + __Pyx_RefNannySetupContext("convolve3d_boundary_extend", 0); + __pyx_pybuffer_conv.pybuffer.buf = NULL; + __pyx_pybuffer_conv.refcount = 0; + __pyx_pybuffernd_conv.data = NULL; + __pyx_pybuffernd_conv.rcbuffer = &__pyx_pybuffer_conv; + __pyx_pybuffer_f.pybuffer.buf = NULL; + __pyx_pybuffer_f.refcount = 0; + __pyx_pybuffernd_f.data = NULL; + __pyx_pybuffernd_f.rcbuffer = &__pyx_pybuffer_f; + __pyx_pybuffer_g.pybuffer.buf = NULL; + __pyx_pybuffer_g.refcount = 0; + __pyx_pybuffernd_g.data = NULL; + __pyx_pybuffernd_g.rcbuffer = &__pyx_pybuffer_g; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_f.rcbuffer->pybuffer, (PyObject*)__pyx_v_f, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 125, __pyx_L1_error) + } + __pyx_pybuffernd_f.diminfo[0].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_f.diminfo[0].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_f.diminfo[1].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_f.diminfo[1].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_f.diminfo[2].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_f.diminfo[2].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[2]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_g.rcbuffer->pybuffer, (PyObject*)__pyx_v_g, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 125, __pyx_L1_error) + } + __pyx_pybuffernd_g.diminfo[0].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_g.diminfo[0].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_g.diminfo[1].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_g.diminfo[1].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_g.diminfo[2].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_g.diminfo[2].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[2]; + + /* "astropy/convolution/boundary_extend.pyx":129 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[0]), 2) != 1) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[1]), 2) != 1) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[2]), 2) != 1) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_extend.pyx":130 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 130, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(0, 130, __pyx_L1_error) + + /* "astropy/convolution/boundary_extend.pyx":129 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + } + + /* "astropy/convolution/boundary_extend.pyx":132 + * raise ValueError("Convolution kernel must have odd dimensions") + * + * assert f.dtype == DTYPE and g.dtype == DTYPE # <<<<<<<<<<<<<< + * + * cdef int nx = f.shape[0] + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_f), __pyx_n_s_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 132, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 132, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 132, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 132, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_g), __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 132, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 132, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 132, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 132, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_1 = __pyx_t_2; + __pyx_L7_bool_binop_done:; + if (unlikely(!__pyx_t_1)) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 132, __pyx_L1_error) + } + } + #endif + + /* "astropy/convolution/boundary_extend.pyx":134 + * assert f.dtype == DTYPE and g.dtype == DTYPE + * + * cdef int nx = f.shape[0] # <<<<<<<<<<<<<< + * cdef int ny = f.shape[1] + * cdef int nz = f.shape[2] + */ + __pyx_v_nx = (__pyx_v_f->dimensions[0]); + + /* "astropy/convolution/boundary_extend.pyx":135 + * + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] # <<<<<<<<<<<<<< + * cdef int nz = f.shape[2] + * cdef int nkx = g.shape[0] + */ + __pyx_v_ny = (__pyx_v_f->dimensions[1]); + + /* "astropy/convolution/boundary_extend.pyx":136 + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] + * cdef int nz = f.shape[2] # <<<<<<<<<<<<<< + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + */ + __pyx_v_nz = (__pyx_v_f->dimensions[2]); + + /* "astropy/convolution/boundary_extend.pyx":137 + * cdef int ny = f.shape[1] + * cdef int nz = f.shape[2] + * cdef int nkx = g.shape[0] # <<<<<<<<<<<<<< + * cdef int nky = g.shape[1] + * cdef int nkz = g.shape[2] + */ + __pyx_v_nkx = (__pyx_v_g->dimensions[0]); + + /* "astropy/convolution/boundary_extend.pyx":138 + * cdef int nz = f.shape[2] + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] # <<<<<<<<<<<<<< + * cdef int nkz = g.shape[2] + * cdef int wkx = nkx // 2 + */ + __pyx_v_nky = (__pyx_v_g->dimensions[1]); + + /* "astropy/convolution/boundary_extend.pyx":139 + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + * cdef int nkz = g.shape[2] # <<<<<<<<<<<<<< + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + */ + __pyx_v_nkz = (__pyx_v_g->dimensions[2]); + + /* "astropy/convolution/boundary_extend.pyx":140 + * cdef int nky = g.shape[1] + * cdef int nkz = g.shape[2] + * cdef int wkx = nkx // 2 # <<<<<<<<<<<<<< + * cdef int wky = nky // 2 + * cdef int wkz = nkz // 2 + */ + __pyx_v_wkx = __Pyx_div_long(__pyx_v_nkx, 2); + + /* "astropy/convolution/boundary_extend.pyx":141 + * cdef int nkz = g.shape[2] + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 # <<<<<<<<<<<<<< + * cdef int wkz = nkz // 2 + * cdef np.ndarray[DTYPE_t, ndim=3] conv = np.empty([nx, ny, nz], dtype=DTYPE) + */ + __pyx_v_wky = __Pyx_div_long(__pyx_v_nky, 2); + + /* "astropy/convolution/boundary_extend.pyx":142 + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + * cdef int wkz = nkz // 2 # <<<<<<<<<<<<<< + * cdef np.ndarray[DTYPE_t, ndim=3] conv = np.empty([nx, ny, nz], dtype=DTYPE) + * cdef unsigned int i, j, k, iii, jjj, kkk + */ + __pyx_v_wkz = __Pyx_div_long(__pyx_v_nkz, 2); + + /* "astropy/convolution/boundary_extend.pyx":143 + * cdef int wky = nky // 2 + * cdef int wkz = nkz // 2 + * cdef np.ndarray[DTYPE_t, ndim=3] conv = np.empty([nx, ny, nz], dtype=DTYPE) # <<<<<<<<<<<<<< + * cdef unsigned int i, j, k, iii, jjj, kkk + * cdef int ii, jj, kk + */ + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_ny); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_nz); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_GIVEREF(__pyx_t_3); + PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_5); + PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_6); + PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_6); + __pyx_t_3 = 0; + __pyx_t_5 = 0; + __pyx_t_6 = 0; + __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_7); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); + __pyx_t_7 = 0; + __pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, __pyx_t_7); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 143, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 143, __pyx_L1_error) + __pyx_t_8 = ((PyArrayObject *)__pyx_t_5); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_conv.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { + __pyx_v_conv = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 143, __pyx_L1_error) + } else {__pyx_pybuffernd_conv.diminfo[0].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_conv.diminfo[0].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_conv.diminfo[1].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_conv.diminfo[1].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_conv.diminfo[2].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_conv.diminfo[2].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[2]; + } + } + __pyx_t_8 = 0; + __pyx_v_conv = ((PyArrayObject *)__pyx_t_5); + __pyx_t_5 = 0; + + /* "astropy/convolution/boundary_extend.pyx":152 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + + /* "astropy/convolution/boundary_extend.pyx":155 + * + * # Now run the proper convolution + * for i in range(nx): # <<<<<<<<<<<<<< + * for j in range(ny): + * for k in range(nz): + */ + __pyx_t_9 = __pyx_v_nx; + for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { + __pyx_v_i = __pyx_t_10; + + /* "astropy/convolution/boundary_extend.pyx":156 + * # Now run the proper convolution + * for i in range(nx): + * for j in range(ny): # <<<<<<<<<<<<<< + * for k in range(nz): + * top = 0. + */ + __pyx_t_11 = __pyx_v_ny; + for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { + __pyx_v_j = __pyx_t_12; + + /* "astropy/convolution/boundary_extend.pyx":157 + * for i in range(nx): + * for j in range(ny): + * for k in range(nz): # <<<<<<<<<<<<<< + * top = 0. + * bot = 0. + */ + __pyx_t_13 = __pyx_v_nz; + for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { + __pyx_v_k = __pyx_t_14; + + /* "astropy/convolution/boundary_extend.pyx":158 + * for j in range(ny): + * for k in range(nz): + * top = 0. # <<<<<<<<<<<<<< + * bot = 0. + * iimin = i - wkx + */ + __pyx_v_top = 0.; + + /* "astropy/convolution/boundary_extend.pyx":159 + * for k in range(nz): + * top = 0. + * bot = 0. # <<<<<<<<<<<<<< + * iimin = i - wkx + * iimax = i + wkx + 1 + */ + __pyx_v_bot = 0.; + + /* "astropy/convolution/boundary_extend.pyx":160 + * top = 0. + * bot = 0. + * iimin = i - wkx # <<<<<<<<<<<<<< + * iimax = i + wkx + 1 + * jjmin = j - wky + */ + __pyx_v_iimin = (__pyx_v_i - __pyx_v_wkx); + + /* "astropy/convolution/boundary_extend.pyx":161 + * bot = 0. + * iimin = i - wkx + * iimax = i + wkx + 1 # <<<<<<<<<<<<<< + * jjmin = j - wky + * jjmax = j + wky + 1 + */ + __pyx_v_iimax = ((__pyx_v_i + __pyx_v_wkx) + 1); + + /* "astropy/convolution/boundary_extend.pyx":162 + * iimin = i - wkx + * iimax = i + wkx + 1 + * jjmin = j - wky # <<<<<<<<<<<<<< + * jjmax = j + wky + 1 + * kkmin = k - wkz + */ + __pyx_v_jjmin = (__pyx_v_j - __pyx_v_wky); + + /* "astropy/convolution/boundary_extend.pyx":163 + * iimax = i + wkx + 1 + * jjmin = j - wky + * jjmax = j + wky + 1 # <<<<<<<<<<<<<< + * kkmin = k - wkz + * kkmax = k + wkz + 1 + */ + __pyx_v_jjmax = ((__pyx_v_j + __pyx_v_wky) + 1); + + /* "astropy/convolution/boundary_extend.pyx":164 + * jjmin = j - wky + * jjmax = j + wky + 1 + * kkmin = k - wkz # <<<<<<<<<<<<<< + * kkmax = k + wkz + 1 + * for ii in range(iimin, iimax): + */ + __pyx_v_kkmin = (__pyx_v_k - __pyx_v_wkz); + + /* "astropy/convolution/boundary_extend.pyx":165 + * jjmax = j + wky + 1 + * kkmin = k - wkz + * kkmax = k + wkz + 1 # <<<<<<<<<<<<<< + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): + */ + __pyx_v_kkmax = ((__pyx_v_k + __pyx_v_wkz) + 1); + + /* "astropy/convolution/boundary_extend.pyx":166 + * kkmin = k - wkz + * kkmax = k + wkz + 1 + * for ii in range(iimin, iimax): # <<<<<<<<<<<<<< + * for jj in range(jjmin, jjmax): + * for kk in range(kkmin, kkmax): + */ + __pyx_t_15 = __pyx_v_iimax; + for (__pyx_t_16 = __pyx_v_iimin; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) { + __pyx_v_ii = __pyx_t_16; + + /* "astropy/convolution/boundary_extend.pyx":167 + * kkmax = k + wkz + 1 + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): # <<<<<<<<<<<<<< + * for kk in range(kkmin, kkmax): + * iii = int_min(int_max(ii, 0), nx - 1) + */ + __pyx_t_17 = __pyx_v_jjmax; + for (__pyx_t_18 = __pyx_v_jjmin; __pyx_t_18 < __pyx_t_17; __pyx_t_18+=1) { + __pyx_v_jj = __pyx_t_18; + + /* "astropy/convolution/boundary_extend.pyx":168 + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): + * for kk in range(kkmin, kkmax): # <<<<<<<<<<<<<< + * iii = int_min(int_max(ii, 0), nx - 1) + * jjj = int_min(int_max(jj, 0), ny - 1) + */ + __pyx_t_19 = __pyx_v_kkmax; + for (__pyx_t_20 = __pyx_v_kkmin; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) { + __pyx_v_kk = __pyx_t_20; + + /* "astropy/convolution/boundary_extend.pyx":169 + * for jj in range(jjmin, jjmax): + * for kk in range(kkmin, kkmax): + * iii = int_min(int_max(ii, 0), nx - 1) # <<<<<<<<<<<<<< + * jjj = int_min(int_max(jj, 0), ny - 1) + * kkk = int_min(int_max(kk, 0), nz - 1) + */ + __pyx_v_iii = __pyx_f_7astropy_11convolution_15boundary_extend_int_min(__pyx_f_7astropy_11convolution_15boundary_extend_int_max(__pyx_v_ii, 0), (__pyx_v_nx - 1)); + + /* "astropy/convolution/boundary_extend.pyx":170 + * for kk in range(kkmin, kkmax): + * iii = int_min(int_max(ii, 0), nx - 1) + * jjj = int_min(int_max(jj, 0), ny - 1) # <<<<<<<<<<<<<< + * kkk = int_min(int_max(kk, 0), nz - 1) + * val = f[iii, jjj, kkk] + */ + __pyx_v_jjj = __pyx_f_7astropy_11convolution_15boundary_extend_int_min(__pyx_f_7astropy_11convolution_15boundary_extend_int_max(__pyx_v_jj, 0), (__pyx_v_ny - 1)); + + /* "astropy/convolution/boundary_extend.pyx":171 + * iii = int_min(int_max(ii, 0), nx - 1) + * jjj = int_min(int_max(jj, 0), ny - 1) + * kkk = int_min(int_max(kk, 0), nz - 1) # <<<<<<<<<<<<<< + * val = f[iii, jjj, kkk] + * ker = g[(nkx - 1 - (wkx + ii - i)), + */ + __pyx_v_kkk = __pyx_f_7astropy_11convolution_15boundary_extend_int_min(__pyx_f_7astropy_11convolution_15boundary_extend_int_max(__pyx_v_kk, 0), (__pyx_v_nz - 1)); + + /* "astropy/convolution/boundary_extend.pyx":172 + * jjj = int_min(int_max(jj, 0), ny - 1) + * kkk = int_min(int_max(kk, 0), nz - 1) + * val = f[iii, jjj, kkk] # <<<<<<<<<<<<<< + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j)), + */ + __pyx_t_21 = __pyx_v_iii; + __pyx_t_22 = __pyx_v_jjj; + __pyx_t_23 = __pyx_v_kkk; + __pyx_v_val = (*__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_22, __pyx_pybuffernd_f.diminfo[1].strides, __pyx_t_23, __pyx_pybuffernd_f.diminfo[2].strides)); + + /* "astropy/convolution/boundary_extend.pyx":173 + * kkk = int_min(int_max(kk, 0), nz - 1) + * val = f[iii, jjj, kkk] + * ker = g[(nkx - 1 - (wkx + ii - i)), # <<<<<<<<<<<<<< + * (nky - 1 - (wky + jj - j)), + * (nkz - 1 - (wkz + kk - k))] + */ + __pyx_t_24 = ((unsigned int)((__pyx_v_nkx - 1) - ((__pyx_v_wkx + __pyx_v_ii) - __pyx_v_i))); + __pyx_t_25 = ((unsigned int)((__pyx_v_nky - 1) - ((__pyx_v_wky + __pyx_v_jj) - __pyx_v_j))); + __pyx_t_26 = ((unsigned int)((__pyx_v_nkz - 1) - ((__pyx_v_wkz + __pyx_v_kk) - __pyx_v_k))); + __pyx_v_ker = (*__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_g.rcbuffer->pybuffer.buf, __pyx_t_24, __pyx_pybuffernd_g.diminfo[0].strides, __pyx_t_25, __pyx_pybuffernd_g.diminfo[1].strides, __pyx_t_26, __pyx_pybuffernd_g.diminfo[2].strides)); + + /* "astropy/convolution/boundary_extend.pyx":176 + * (nky - 1 - (wky + jj - j)), + * (nkz - 1 - (wkz + kk - k))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + __pyx_t_1 = ((!(npy_isnan(__pyx_v_val) != 0)) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_extend.pyx":177 + * (nkz - 1 - (wkz + kk - k))] + * if not npy_isnan(val): + * top += val * ker # <<<<<<<<<<<<<< + * bot += ker + * if normalize_by_kernel: + */ + __pyx_v_top = (__pyx_v_top + (__pyx_v_val * __pyx_v_ker)); + + /* "astropy/convolution/boundary_extend.pyx":178 + * if not npy_isnan(val): + * top += val * ker + * bot += ker # <<<<<<<<<<<<<< + * if normalize_by_kernel: + * if bot == 0: + */ + __pyx_v_bot = (__pyx_v_bot + __pyx_v_ker); + + /* "astropy/convolution/boundary_extend.pyx":176 + * (nky - 1 - (wky + jj - j)), + * (nkz - 1 - (wkz + kk - k))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + } + } + } + } + + /* "astropy/convolution/boundary_extend.pyx":179 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j, k] = f[i, j, k] + */ + __pyx_t_1 = (__pyx_v_normalize_by_kernel != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_extend.pyx":180 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j, k] = f[i, j, k] + * else: + */ + __pyx_t_1 = ((__pyx_v_bot == 0.0) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_extend.pyx":181 + * if normalize_by_kernel: + * if bot == 0: + * conv[i, j, k] = f[i, j, k] # <<<<<<<<<<<<<< + * else: + * conv[i, j, k] = top / bot + */ + __pyx_t_27 = __pyx_v_i; + __pyx_t_28 = __pyx_v_j; + __pyx_t_29 = __pyx_v_k; + __pyx_t_30 = __pyx_v_i; + __pyx_t_31 = __pyx_v_j; + __pyx_t_32 = __pyx_v_k; + *__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_30, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_31, __pyx_pybuffernd_conv.diminfo[1].strides, __pyx_t_32, __pyx_pybuffernd_conv.diminfo[2].strides) = (*__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_27, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_28, __pyx_pybuffernd_f.diminfo[1].strides, __pyx_t_29, __pyx_pybuffernd_f.diminfo[2].strides)); + + /* "astropy/convolution/boundary_extend.pyx":180 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j, k] = f[i, j, k] + * else: + */ + goto __pyx_L26; + } + + /* "astropy/convolution/boundary_extend.pyx":183 + * conv[i, j, k] = f[i, j, k] + * else: + * conv[i, j, k] = top / bot # <<<<<<<<<<<<<< + * else: + * conv[i, j, k] = top + */ + /*else*/ { + if (unlikely(__pyx_v_bot == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 183, __pyx_L10_error) + } + __pyx_t_33 = __pyx_v_i; + __pyx_t_34 = __pyx_v_j; + __pyx_t_35 = __pyx_v_k; + *__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_33, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_34, __pyx_pybuffernd_conv.diminfo[1].strides, __pyx_t_35, __pyx_pybuffernd_conv.diminfo[2].strides) = (__pyx_v_top / __pyx_v_bot); + } + __pyx_L26:; + + /* "astropy/convolution/boundary_extend.pyx":179 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j, k] = f[i, j, k] + */ + goto __pyx_L25; + } + + /* "astropy/convolution/boundary_extend.pyx":185 + * conv[i, j, k] = top / bot + * else: + * conv[i, j, k] = top # <<<<<<<<<<<<<< + * # GIL acquired again here + * return conv + */ + /*else*/ { + __pyx_t_36 = __pyx_v_i; + __pyx_t_37 = __pyx_v_j; + __pyx_t_38 = __pyx_v_k; + *__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_15boundary_extend_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_36, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_37, __pyx_pybuffernd_conv.diminfo[1].strides, __pyx_t_38, __pyx_pybuffernd_conv.diminfo[2].strides) = __pyx_v_top; + } + __pyx_L25:; + } + } + } + } + + /* "astropy/convolution/boundary_extend.pyx":152 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L11; + } + __pyx_L10_error: { + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L1_error; + } + __pyx_L11:; + } + } + + /* "astropy/convolution/boundary_extend.pyx":187 + * conv[i, j, k] = top + * # GIL acquired again here + * return conv # <<<<<<<<<<<<<< + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_conv)); + __pyx_r = ((PyObject *)__pyx_v_conv); + goto __pyx_L0; + + /* "astropy/convolution/boundary_extend.pyx":125 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_extend(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * bint normalize_by_kernel): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("astropy.convolution.boundary_extend.convolve3d_boundary_extend", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_conv); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 + * # experimental exception made for __getbuffer__ and __releasebuffer__ + * # -- the details of this may change. + * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< + * # This implementation of getbuffer is geared towards Cython + * # requirements, and does not yet fullfill the PEP. + */ + +/* Python wrapper */ +static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_v_copy_shape; + int __pyx_v_i; + int __pyx_v_ndim; + int __pyx_v_endian_detector; + int __pyx_v_little_endian; + int __pyx_v_t; + char *__pyx_v_f; + PyArray_Descr *__pyx_v_descr = 0; + int __pyx_v_offset; + int __pyx_v_hasfields; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + char *__pyx_t_7; + __Pyx_RefNannySetupContext("__getbuffer__", 0); + if (__pyx_v_info != NULL) { + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":220 + * # of flags + * + * if info == NULL: return # <<<<<<<<<<<<<< + * + * cdef int copy_shape, i, ndim + */ + __pyx_t_1 = ((__pyx_v_info == NULL) != 0); + if (__pyx_t_1) { + __pyx_r = 0; + goto __pyx_L0; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":223 + * + * cdef int copy_shape, i, ndim + * cdef int endian_detector = 1 # <<<<<<<<<<<<<< + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * + */ + __pyx_v_endian_detector = 1; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":224 + * cdef int copy_shape, i, ndim + * cdef int endian_detector = 1 + * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< + * + * ndim = PyArray_NDIM(self) + */ + __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":226 + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * + * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 + * ndim = PyArray_NDIM(self) + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * copy_shape = 1 + * else: + */ + __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":229 + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * copy_shape = 1 # <<<<<<<<<<<<<< + * else: + * copy_shape = 0 + */ + __pyx_v_copy_shape = 1; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 + * ndim = PyArray_NDIM(self) + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * copy_shape = 1 + * else: + */ + goto __pyx_L4; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231 + * copy_shape = 1 + * else: + * copy_shape = 0 # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + */ + /*else*/ { + __pyx_v_copy_shape = 0; + } + __pyx_L4:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L6_bool_binop_done; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":234 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< + * raise ValueError(u"ndarray is not C contiguous") + * + */ + __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L6_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 235, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L9_bool_binop_done; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":238 + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< + * raise ValueError(u"ndarray is not Fortran contiguous") + * + */ + __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L9_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< + * + * info.buf = PyArray_DATA(self) + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 239, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 239, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":241 + * raise ValueError(u"ndarray is not Fortran contiguous") + * + * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< + * info.ndim = ndim + * if copy_shape: + */ + __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":242 + * + * info.buf = PyArray_DATA(self) + * info.ndim = ndim # <<<<<<<<<<<<<< + * if copy_shape: + * # Allocate new buffer for strides and shape info. + */ + __pyx_v_info->ndim = __pyx_v_ndim; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 + * info.buf = PyArray_DATA(self) + * info.ndim = ndim + * if copy_shape: # <<<<<<<<<<<<<< + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + */ + __pyx_t_1 = (__pyx_v_copy_shape != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":246 + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) # <<<<<<<<<<<<<< + * info.shape = info.strides + ndim + * for i in range(ndim): + */ + __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":247 + * # This is allocated as one block, strides first. + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) + * info.shape = info.strides + ndim # <<<<<<<<<<<<<< + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] + */ + __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":248 + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) + * info.shape = info.strides + ndim + * for i in range(ndim): # <<<<<<<<<<<<<< + * info.strides[i] = PyArray_STRIDES(self)[i] + * info.shape[i] = PyArray_DIMS(self)[i] + */ + __pyx_t_4 = __pyx_v_ndim; + for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { + __pyx_v_i = __pyx_t_5; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":249 + * info.shape = info.strides + ndim + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< + * info.shape[i] = PyArray_DIMS(self)[i] + * else: + */ + (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":250 + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] + * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< + * else: + * info.strides = PyArray_STRIDES(self) + */ + (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 + * info.buf = PyArray_DATA(self) + * info.ndim = ndim + * if copy_shape: # <<<<<<<<<<<<<< + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + */ + goto __pyx_L11; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":252 + * info.shape[i] = PyArray_DIMS(self)[i] + * else: + * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL + */ + /*else*/ { + __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":253 + * else: + * info.strides = PyArray_STRIDES(self) + * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) + */ + __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); + } + __pyx_L11:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":254 + * info.strides = PyArray_STRIDES(self) + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * info.itemsize = PyArray_ITEMSIZE(self) + * info.readonly = not PyArray_ISWRITEABLE(self) + */ + __pyx_v_info->suboffsets = NULL; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":255 + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< + * info.readonly = not PyArray_ISWRITEABLE(self) + * + */ + __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":256 + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) + * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< + * + * cdef int t + */ + __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":259 + * + * cdef int t + * cdef char* f = NULL # <<<<<<<<<<<<<< + * cdef dtype descr = self.descr + * cdef int offset + */ + __pyx_v_f = NULL; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":260 + * cdef int t + * cdef char* f = NULL + * cdef dtype descr = self.descr # <<<<<<<<<<<<<< + * cdef int offset + * + */ + __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); + __Pyx_INCREF(__pyx_t_3); + __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":263 + * cdef int offset + * + * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< + * + * if not hasfields and not copy_shape: + */ + __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 + * cdef bint hasfields = PyDataType_HASFIELDS(descr) + * + * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< + * # do not call releasebuffer + * info.obj = None + */ + __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L15_bool_binop_done; + } + __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L15_bool_binop_done:; + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":267 + * if not hasfields and not copy_shape: + * # do not call releasebuffer + * info.obj = None # <<<<<<<<<<<<<< + * else: + * # need to call releasebuffer + */ + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = Py_None; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 + * cdef bint hasfields = PyDataType_HASFIELDS(descr) + * + * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< + * # do not call releasebuffer + * info.obj = None + */ + goto __pyx_L14; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 + * else: + * # need to call releasebuffer + * info.obj = self # <<<<<<<<<<<<<< + * + * if not hasfields: + */ + /*else*/ { + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + } + __pyx_L14:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 + * info.obj = self + * + * if not hasfields: # <<<<<<<<<<<<<< + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + */ + __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":273 + * + * if not hasfields: + * t = descr.type_num # <<<<<<<<<<<<<< + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + */ + __pyx_t_4 = __pyx_v_descr->type_num; + __pyx_v_t = __pyx_t_4; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); + if (!__pyx_t_2) { + goto __pyx_L20_next_or; + } else { + } + __pyx_t_2 = (__pyx_v_little_endian != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L19_bool_binop_done; + } + __pyx_L20_next_or:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275 + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" + */ + __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L19_bool_binop_done; + } + __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L19_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 276, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":277 + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" + */ + switch (__pyx_v_t) { + case NPY_BYTE: + __pyx_v_f = ((char *)"b"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278 + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" + */ + case NPY_UBYTE: + __pyx_v_f = ((char *)"B"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279 + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" + */ + case NPY_SHORT: + __pyx_v_f = ((char *)"h"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" + */ + case NPY_USHORT: + __pyx_v_f = ((char *)"H"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":281 + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" + */ + case NPY_INT: + __pyx_v_f = ((char *)"i"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":282 + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" + */ + case NPY_UINT: + __pyx_v_f = ((char *)"I"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283 + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" + */ + case NPY_LONG: + __pyx_v_f = ((char *)"l"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284 + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" + */ + case NPY_ULONG: + __pyx_v_f = ((char *)"L"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":285 + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" + */ + case NPY_LONGLONG: + __pyx_v_f = ((char *)"q"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286 + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" + */ + case NPY_ULONGLONG: + __pyx_v_f = ((char *)"Q"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287 + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" + */ + case NPY_FLOAT: + __pyx_v_f = ((char *)"f"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":288 + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" + */ + case NPY_DOUBLE: + __pyx_v_f = ((char *)"d"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289 + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" + */ + case NPY_LONGDOUBLE: + __pyx_v_f = ((char *)"g"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":290 + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" + */ + case NPY_CFLOAT: + __pyx_v_f = ((char *)"Zf"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":291 + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< + * elif t == NPY_CLONGDOUBLE: f = "Zg" + * elif t == NPY_OBJECT: f = "O" + */ + case NPY_CDOUBLE: + __pyx_v_f = ((char *)"Zd"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292 + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< + * elif t == NPY_OBJECT: f = "O" + * else: + */ + case NPY_CLONGDOUBLE: + __pyx_v_f = ((char *)"Zg"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":293 + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" + * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + */ + case NPY_OBJECT: + __pyx_v_f = ((char *)"O"); + break; + default: + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":295 + * elif t == NPY_OBJECT: f = "O" + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< + * info.format = f + * return + */ + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_6, 0, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 295, __pyx_L1_error) + break; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296 + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * info.format = f # <<<<<<<<<<<<<< + * return + * else: + */ + __pyx_v_info->format = __pyx_v_f; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297 + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * info.format = f + * return # <<<<<<<<<<<<<< + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 + * info.obj = self + * + * if not hasfields: # <<<<<<<<<<<<<< + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":299 + * return + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 + */ + /*else*/ { + __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":300 + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) + * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< + * offset = 0 + * f = _util_dtypestring(descr, info.format + 1, + */ + (__pyx_v_info->format[0]) = '^'; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":301 + * info.format = PyObject_Malloc(_buffer_format_string_len) + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 # <<<<<<<<<<<<<< + * f = _util_dtypestring(descr, info.format + 1, + * info.format + _buffer_format_string_len, + */ + __pyx_v_offset = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 + * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< + * info.format + _buffer_format_string_len, + * &offset) + */ + __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 302, __pyx_L1_error) + __pyx_v_f = __pyx_t_7; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":305 + * info.format + _buffer_format_string_len, + * &offset) + * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + */ + (__pyx_v_f[0]) = '\x00'; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 + * # experimental exception made for __getbuffer__ and __releasebuffer__ + * # -- the details of this may change. + * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< + * # This implementation of getbuffer is geared towards Cython + * # requirements, and does not yet fullfill the PEP. + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(Py_None); + __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; + } + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_descr); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 + * f[0] = c'\0' # Terminate format string + * + * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + */ + +/* Python wrapper */ +static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ +static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); + __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__releasebuffer__", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":309 + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) # <<<<<<<<<<<<<< + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * PyObject_Free(info.strides) + */ + PyObject_Free(__pyx_v_info->format); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * PyObject_Free(info.strides) + * # info.shape was stored after info.strides in the same block + */ + __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":311 + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * PyObject_Free(info.strides) # <<<<<<<<<<<<<< + * # info.shape was stored after info.strides in the same block + * + */ + PyObject_Free(__pyx_v_info->strides); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * PyObject_Free(info.strides) + * # info.shape was stored after info.strides in the same block + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 + * f[0] = c'\0' # Terminate format string + * + * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":789 + * + * cdef inline object PyArray_MultiIterNew1(a): + * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew2(a, b): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 789, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":792 + * + * cdef inline object PyArray_MultiIterNew2(a, b): + * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 792, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":795 + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 795, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":798 + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 798, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801 + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 801, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805 + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape # <<<<<<<<<<<<<< + * else: + * return () + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); + __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":807 + * return d.subarray.shape + * else: + * return () # <<<<<<<<<<<<<< + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_empty_tuple); + __pyx_r = __pyx_empty_tuple; + goto __pyx_L0; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { + PyArray_Descr *__pyx_v_child = 0; + int __pyx_v_endian_detector; + int __pyx_v_little_endian; + PyObject *__pyx_v_fields = 0; + PyObject *__pyx_v_childname = NULL; + PyObject *__pyx_v_new_offset = NULL; + PyObject *__pyx_v_t = NULL; + char *__pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_t_7; + long __pyx_t_8; + char *__pyx_t_9; + __Pyx_RefNannySetupContext("_util_dtypestring", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814 + * + * cdef dtype child + * cdef int endian_detector = 1 # <<<<<<<<<<<<<< + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * cdef tuple fields + */ + __pyx_v_endian_detector = 1; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":815 + * cdef dtype child + * cdef int endian_detector = 1 + * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< + * cdef tuple fields + * + */ + __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + if (unlikely(__pyx_v_descr->names == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); + __PYX_ERR(1, 818, __pyx_L1_error) + } + __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; + for (;;) { + if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 818, __pyx_L1_error) + #else + __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 818, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + #endif + __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); + __pyx_t_3 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":819 + * + * for childname in descr.names: + * fields = descr.fields[childname] # <<<<<<<<<<<<<< + * child, new_offset = fields + * + */ + if (unlikely(__pyx_v_descr->fields == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 819, __pyx_L1_error) + } + __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); + __pyx_t_3 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":820 + * for childname in descr.names: + * fields = descr.fields[childname] + * child, new_offset = fields # <<<<<<<<<<<<<< + * + * if (end - f) - (new_offset - offset[0]) < 15: + */ + if (likely(__pyx_v_fields != Py_None)) { + PyObject* sequence = __pyx_v_fields; + #if !CYTHON_COMPILING_IN_PYPY + Py_ssize_t size = Py_SIZE(sequence); + #else + Py_ssize_t size = PySequence_Size(sequence); + #endif + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 820, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + #else + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #endif + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 820, __pyx_L1_error) + } + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); + __pyx_t_3 = 0; + __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 823, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 823, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); + if (!__pyx_t_7) { + goto __pyx_L8_next_or; + } else { + } + __pyx_t_7 = (__pyx_v_little_endian != 0); + if (!__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_L8_next_or:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":826 + * + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< + * raise ValueError(u"Non-native byte order not supported") + * # One could encode it in the format string and have Cython + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); + if (__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); + __pyx_t_6 = __pyx_t_7; + __pyx_L7_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 827, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 + * + * # Output padding bytes + * while offset[0] < new_offset: # <<<<<<<<<<<<<< + * f[0] = 120 # "x"; pad byte + * f += 1 + */ + while (1) { + __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!__pyx_t_6) break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838 + * # Output padding bytes + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< + * f += 1 + * offset[0] += 1 + */ + (__pyx_v_f[0]) = 0x78; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":839 + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte + * f += 1 # <<<<<<<<<<<<<< + * offset[0] += 1 + * + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840 + * f[0] = 120 # "x"; pad byte + * f += 1 + * offset[0] += 1 # <<<<<<<<<<<<<< + * + * offset[0] += child.itemsize + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842 + * offset[0] += 1 + * + * offset[0] += child.itemsize # <<<<<<<<<<<<<< + * + * if not PyDataType_HASFIELDS(child): + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":845 + * + * if not PyDataType_HASFIELDS(child): + * t = child.type_num # <<<<<<<<<<<<<< + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") + */ + __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 845, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 847, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 847, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":850 + * + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 98; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 66; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852 + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x68; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853 + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 72; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":854 + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x69; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 73; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856 + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x6C; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":857 + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 76; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x71; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":859 + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 81; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860 + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x66; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":861 + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x64; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":862 + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x67; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":863 + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x66; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":864 + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x64; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":865 + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x67; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":866 + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 79; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":868 + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< + * f += 1 + * else: + */ + /*else*/ { + __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); + __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 868, __pyx_L1_error) + } + __pyx_L15:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":869 + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * f += 1 # <<<<<<<<<<<<<< + * else: + * # Cython ignores struct boundary information ("T{...}"), + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + goto __pyx_L13; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":873 + * # Cython ignores struct boundary information ("T{...}"), + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< + * return f + * + */ + /*else*/ { + __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 873, __pyx_L1_error) + __pyx_v_f = __pyx_t_9; + } + __pyx_L13:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":874 + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) + * return f # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_f; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_child); + __Pyx_XDECREF(__pyx_v_fields); + __Pyx_XDECREF(__pyx_v_childname); + __Pyx_XDECREF(__pyx_v_new_offset); + __Pyx_XDECREF(__pyx_v_t); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 + * + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * cdef PyObject* baseptr + * if base is None: + */ + +static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { + PyObject *__pyx_v_baseptr; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + __Pyx_RefNannySetupContext("set_array_base", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 + * cdef inline void set_array_base(ndarray arr, object base): + * cdef PyObject* baseptr + * if base is None: # <<<<<<<<<<<<<< + * baseptr = NULL + * else: + */ + __pyx_t_1 = (__pyx_v_base == Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":993 + * cdef PyObject* baseptr + * if base is None: + * baseptr = NULL # <<<<<<<<<<<<<< + * else: + * Py_INCREF(base) # important to do this before decref below! + */ + __pyx_v_baseptr = NULL; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 + * cdef inline void set_array_base(ndarray arr, object base): + * cdef PyObject* baseptr + * if base is None: # <<<<<<<<<<<<<< + * baseptr = NULL + * else: + */ + goto __pyx_L3; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":995 + * baseptr = NULL + * else: + * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< + * baseptr = base + * Py_XDECREF(arr.base) + */ + /*else*/ { + Py_INCREF(__pyx_v_base); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":996 + * else: + * Py_INCREF(base) # important to do this before decref below! + * baseptr = base # <<<<<<<<<<<<<< + * Py_XDECREF(arr.base) + * arr.base = baseptr + */ + __pyx_v_baseptr = ((PyObject *)__pyx_v_base); + } + __pyx_L3:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997 + * Py_INCREF(base) # important to do this before decref below! + * baseptr = base + * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< + * arr.base = baseptr + * + */ + Py_XDECREF(__pyx_v_arr->base); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":998 + * baseptr = base + * Py_XDECREF(arr.base) + * arr.base = baseptr # <<<<<<<<<<<<<< + * + * cdef inline object get_array_base(ndarray arr): + */ + __pyx_v_arr->base = __pyx_v_baseptr; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 + * + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * cdef PyObject* baseptr + * if base is None: + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 + * arr.base = baseptr + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * if arr.base is NULL: + * return None + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("get_array_base", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 + * + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: # <<<<<<<<<<<<<< + * return None + * else: + */ + __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1002 + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: + * return None # <<<<<<<<<<<<<< + * else: + * return arr.base + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(Py_None); + __pyx_r = Py_None; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 + * + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: # <<<<<<<<<<<<<< + * return None + * else: + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1004 + * return None + * else: + * return arr.base # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); + __pyx_r = ((PyObject *)__pyx_v_arr->base); + goto __pyx_L0; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 + * arr.base = baseptr + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * if arr.base is NULL: + * return None + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * _import_array() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_array", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1011 + * cdef inline int import_array() except -1: + * try: + * _import_array() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") + */ + __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1011, __pyx_L3_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1012 + * try: + * _import_array() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.multiarray failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1012, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 + * _import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1013, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1013, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * _import_array() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_umath", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1017 + * cdef inline int import_umath() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1017, __pyx_L3_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1018 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1018, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1019, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1019, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_ufunc", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1023 + * cdef inline int import_ufunc() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1023, __pyx_L3_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1024 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1024, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1025, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1025, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_boundary_extend(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_boundary_extend}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "boundary_extend", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_kp_s_Convolution_kernel_must_have_odd, __pyx_k_Convolution_kernel_must_have_odd, sizeof(__pyx_k_Convolution_kernel_must_have_odd), 0, 0, 1, 0}, + {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, + {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, + {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, + {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, + {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, + {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, + {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, + {&__pyx_kp_s_astropy_convolution_boundary_ext, __pyx_k_astropy_convolution_boundary_ext, sizeof(__pyx_k_astropy_convolution_boundary_ext), 0, 0, 1, 0}, + {&__pyx_n_s_astropy_convolution_boundary_ext_2, __pyx_k_astropy_convolution_boundary_ext_2, sizeof(__pyx_k_astropy_convolution_boundary_ext_2), 0, 0, 1, 1}, + {&__pyx_n_s_bot, __pyx_k_bot, sizeof(__pyx_k_bot), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_conv, __pyx_k_conv, sizeof(__pyx_k_conv), 0, 0, 1, 1}, + {&__pyx_n_s_convolve1d_boundary_extend, __pyx_k_convolve1d_boundary_extend, sizeof(__pyx_k_convolve1d_boundary_extend), 0, 0, 1, 1}, + {&__pyx_n_s_convolve2d_boundary_extend, __pyx_k_convolve2d_boundary_extend, sizeof(__pyx_k_convolve2d_boundary_extend), 0, 0, 1, 1}, + {&__pyx_n_s_convolve3d_boundary_extend, __pyx_k_convolve3d_boundary_extend, sizeof(__pyx_k_convolve3d_boundary_extend), 0, 0, 1, 1}, + {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, + {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1}, + {&__pyx_n_s_f, __pyx_k_f, sizeof(__pyx_k_f), 0, 0, 1, 1}, + {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, + {&__pyx_n_s_g, __pyx_k_g, sizeof(__pyx_k_g), 0, 0, 1, 1}, + {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, + {&__pyx_n_s_ii, __pyx_k_ii, sizeof(__pyx_k_ii), 0, 0, 1, 1}, + {&__pyx_n_s_iii, __pyx_k_iii, sizeof(__pyx_k_iii), 0, 0, 1, 1}, + {&__pyx_n_s_iimax, __pyx_k_iimax, sizeof(__pyx_k_iimax), 0, 0, 1, 1}, + {&__pyx_n_s_iimin, __pyx_k_iimin, sizeof(__pyx_k_iimin), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, + {&__pyx_n_s_jj, __pyx_k_jj, sizeof(__pyx_k_jj), 0, 0, 1, 1}, + {&__pyx_n_s_jjj, __pyx_k_jjj, sizeof(__pyx_k_jjj), 0, 0, 1, 1}, + {&__pyx_n_s_jjmax, __pyx_k_jjmax, sizeof(__pyx_k_jjmax), 0, 0, 1, 1}, + {&__pyx_n_s_jjmin, __pyx_k_jjmin, sizeof(__pyx_k_jjmin), 0, 0, 1, 1}, + {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, + {&__pyx_n_s_ker, __pyx_k_ker, sizeof(__pyx_k_ker), 0, 0, 1, 1}, + {&__pyx_n_s_kk, __pyx_k_kk, sizeof(__pyx_k_kk), 0, 0, 1, 1}, + {&__pyx_n_s_kkk, __pyx_k_kkk, sizeof(__pyx_k_kkk), 0, 0, 1, 1}, + {&__pyx_n_s_kkmax, __pyx_k_kkmax, sizeof(__pyx_k_kkmax), 0, 0, 1, 1}, + {&__pyx_n_s_kkmin, __pyx_k_kkmin, sizeof(__pyx_k_kkmin), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, + {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, + {&__pyx_n_s_nkx, __pyx_k_nkx, sizeof(__pyx_k_nkx), 0, 0, 1, 1}, + {&__pyx_n_s_nky, __pyx_k_nky, sizeof(__pyx_k_nky), 0, 0, 1, 1}, + {&__pyx_n_s_nkz, __pyx_k_nkz, sizeof(__pyx_k_nkz), 0, 0, 1, 1}, + {&__pyx_n_s_normalize_by_kernel, __pyx_k_normalize_by_kernel, sizeof(__pyx_k_normalize_by_kernel), 0, 0, 1, 1}, + {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, + {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, + {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, + {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, + {&__pyx_n_s_nx, __pyx_k_nx, sizeof(__pyx_k_nx), 0, 0, 1, 1}, + {&__pyx_n_s_ny, __pyx_k_ny, sizeof(__pyx_k_ny), 0, 0, 1, 1}, + {&__pyx_n_s_nz, __pyx_k_nz, sizeof(__pyx_k_nz), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_n_s_top, __pyx_k_top, sizeof(__pyx_k_top), 0, 0, 1, 1}, + {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, + {&__pyx_n_s_val, __pyx_k_val, sizeof(__pyx_k_val), 0, 0, 1, 1}, + {&__pyx_n_s_wkx, __pyx_k_wkx, sizeof(__pyx_k_wkx), 0, 0, 1, 1}, + {&__pyx_n_s_wky, __pyx_k_wky, sizeof(__pyx_k_wky), 0, 0, 1, 1}, + {&__pyx_n_s_wkz, __pyx_k_wkz, sizeof(__pyx_k_wkz), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 25, __pyx_L1_error) + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 44, __pyx_L1_error) + __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 823, __pyx_L1_error) + __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1013, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "astropy/convolution/boundary_extend.pyx":25 + * + * if g.shape[0] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Convolution_kernel_must_have_odd); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 25, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple_); + __Pyx_GIVEREF(__pyx_tuple_); + + /* "astropy/convolution/boundary_extend.pyx":73 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Convolution_kernel_must_have_odd); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 73, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "astropy/convolution/boundary_extend.pyx":130 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_Convolution_kernel_must_have_odd); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 130, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + */ + __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< + * + * info.buf = PyArray_DATA(self) + */ + __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 239, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + */ + __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 276, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 823, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__7); + __Pyx_GIVEREF(__pyx_tuple__7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 827, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__8); + __Pyx_GIVEREF(__pyx_tuple__8); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 847, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__9); + __Pyx_GIVEREF(__pyx_tuple__9); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 + * _import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__10); + __Pyx_GIVEREF(__pyx_tuple__10); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 1019, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__11); + __Pyx_GIVEREF(__pyx_tuple__11); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + */ + __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 1025, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__12); + __Pyx_GIVEREF(__pyx_tuple__12); + + /* "astropy/convolution/boundary_extend.pyx":19 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_extend(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * bint normalize_by_kernel + */ + __pyx_tuple__13 = PyTuple_Pack(16, __pyx_n_s_f, __pyx_n_s_g, __pyx_n_s_normalize_by_kernel, __pyx_n_s_nx, __pyx_n_s_nkx, __pyx_n_s_wkx, __pyx_n_s_conv, __pyx_n_s_i, __pyx_n_s_iii, __pyx_n_s_ii, __pyx_n_s_iimin, __pyx_n_s_iimax, __pyx_n_s_top, __pyx_n_s_bot, __pyx_n_s_ker, __pyx_n_s_val); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 19, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__13); + __Pyx_GIVEREF(__pyx_tuple__13); + __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(3, 0, 16, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_convolution_boundary_ext, __pyx_n_s_convolve1d_boundary_extend, 19, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) __PYX_ERR(0, 19, __pyx_L1_error) + + /* "astropy/convolution/boundary_extend.pyx":68 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_extend(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * bint normalize_by_kernel): + */ + __pyx_tuple__15 = PyTuple_Pack(24, __pyx_n_s_f, __pyx_n_s_g, __pyx_n_s_normalize_by_kernel, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_nkx, __pyx_n_s_nky, __pyx_n_s_wkx, __pyx_n_s_wky, __pyx_n_s_conv, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_iii, __pyx_n_s_jjj, __pyx_n_s_ii, __pyx_n_s_jj, __pyx_n_s_iimin, __pyx_n_s_iimax, __pyx_n_s_jjmin, __pyx_n_s_jjmax, __pyx_n_s_top, __pyx_n_s_bot, __pyx_n_s_ker, __pyx_n_s_val); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 68, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__15); + __Pyx_GIVEREF(__pyx_tuple__15); + __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(3, 0, 24, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_convolution_boundary_ext, __pyx_n_s_convolve2d_boundary_extend, 68, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) __PYX_ERR(0, 68, __pyx_L1_error) + + /* "astropy/convolution/boundary_extend.pyx":125 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_extend(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * bint normalize_by_kernel): + */ + __pyx_tuple__17 = PyTuple_Pack(32, __pyx_n_s_f, __pyx_n_s_g, __pyx_n_s_normalize_by_kernel, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_nz, __pyx_n_s_nkx, __pyx_n_s_nky, __pyx_n_s_nkz, __pyx_n_s_wkx, __pyx_n_s_wky, __pyx_n_s_wkz, __pyx_n_s_conv, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_iii, __pyx_n_s_jjj, __pyx_n_s_kkk, __pyx_n_s_ii, __pyx_n_s_jj, __pyx_n_s_kk, __pyx_n_s_iimin, __pyx_n_s_iimax, __pyx_n_s_jjmin, __pyx_n_s_jjmax, __pyx_n_s_kkmin, __pyx_n_s_kkmax, __pyx_n_s_top, __pyx_n_s_bot, __pyx_n_s_ker, __pyx_n_s_val); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(0, 125, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__17); + __Pyx_GIVEREF(__pyx_tuple__17); + __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(3, 0, 32, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_convolution_boundary_ext, __pyx_n_s_convolve3d_boundary_extend, 125, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) __PYX_ERR(0, 125, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + return 0; + __pyx_L1_error:; + return -1; +} + +#if PY_MAJOR_VERSION < 3 +PyMODINIT_FUNC initboundary_extend(void); /*proto*/ +PyMODINIT_FUNC initboundary_extend(void) +#else +PyMODINIT_FUNC PyInit_boundary_extend(void); /*proto*/ +PyMODINIT_FUNC PyInit_boundary_extend(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + result = PyDict_SetItemString(moddict, to_name, value); + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static int __pyx_pymod_exec_boundary_extend(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; + #endif + #if CYTHON_REFNANNY + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); + if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); + } + #endif + __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_boundary_extend(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("boundary_extend", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + #if CYTHON_COMPILING_IN_PYPY + Py_INCREF(__pyx_b); + #endif + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_astropy__convolution__boundary_extend) { + if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "astropy.convolution.boundary_extend")) { + if (unlikely(PyDict_SetItemString(modules, "astropy.convolution.boundary_extend", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global init code ---*/ + /*--- Variable export code ---*/ + /*--- Function export code ---*/ + /*--- Type init code ---*/ + /*--- Type import code ---*/ + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", + #if CYTHON_COMPILING_IN_PYPY + sizeof(PyTypeObject), + #else + sizeof(PyHeapTypeObject), + #endif + 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) + __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 163, __pyx_L1_error) + __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 185, __pyx_L1_error) + __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 189, __pyx_L1_error) + __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 198, __pyx_L1_error) + __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 885, __pyx_L1_error) + /*--- Variable import code ---*/ + /*--- Function import code ---*/ + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "astropy/convolution/boundary_extend.pyx":3 + * # Licensed under a 3-clause BSD style license - see LICENSE.rst + * from __future__ import division + * import numpy as np # <<<<<<<<<<<<<< + * cimport numpy as np + * + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 3, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/convolution/boundary_extend.pyx":6 + * cimport numpy as np + * + * DTYPE = np.float # <<<<<<<<<<<<<< + * ctypedef np.float_t DTYPE_t + * + */ + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) __PYX_ERR(0, 6, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_extend.pyx":19 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_extend(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * bint normalize_by_kernel + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7astropy_11convolution_15boundary_extend_1convolve1d_boundary_extend, NULL, __pyx_n_s_astropy_convolution_boundary_ext_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_convolve1d_boundary_extend, __pyx_t_2) < 0) __PYX_ERR(0, 19, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_extend.pyx":68 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_extend(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * bint normalize_by_kernel): + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7astropy_11convolution_15boundary_extend_3convolve2d_boundary_extend, NULL, __pyx_n_s_astropy_convolution_boundary_ext_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 68, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_convolve2d_boundary_extend, __pyx_t_2) < 0) __PYX_ERR(0, 68, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_extend.pyx":125 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_extend(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * bint normalize_by_kernel): + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7astropy_11convolution_15boundary_extend_5convolve3d_boundary_extend, NULL, __pyx_n_s_astropy_convolution_boundary_ext_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 125, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_convolve3d_boundary_extend, __pyx_t_2) < 0) __PYX_ERR(0, 125, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_extend.pyx":1 + * # Licensed under a 3-clause BSD style license - see LICENSE.rst # <<<<<<<<<<<<<< + * from __future__ import division + * import numpy as np + */ + __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init astropy.convolution.boundary_extend", 0, __pyx_lineno, __pyx_filename); + } + Py_DECREF(__pyx_m); __pyx_m = 0; + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init astropy.convolution.boundary_extend"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule((char *)modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* ArgTypeTest */ +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", + name, type->tp_name, Py_TYPE(obj)->tp_name); + return 0; +} + +/* IsLittleEndian */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) +{ + union { + uint32_t u32; + uint8_t u8[4]; + } S; + S.u32 = 0x01020304; + return S.u8[0] == 4; +} + +/* BufferFormatCheck */ +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type) { + stack[0].field = &ctx->root; + stack[0].parent_offset = 0; + ctx->root.type = type; + ctx->root.name = "buffer dtype"; + ctx->root.offset = 0; + ctx->head = stack; + ctx->head->field = &ctx->root; + ctx->fmt_offset = 0; + ctx->head->parent_offset = 0; + ctx->new_packmode = '@'; + ctx->enc_packmode = '@'; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->is_complex = 0; + ctx->is_valid_array = 0; + ctx->struct_alignment = 0; + while (type->typegroup == 'S') { + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = 0; + type = type->fields->type; + } +} +static int __Pyx_BufFmt_ParseNumber(const char** ts) { + int count; + const char* t = *ts; + if (*t < '0' || *t > '9') { + return -1; + } else { + count = *t++ - '0'; + while (*t >= '0' && *t < '9') { + count *= 10; + count += *t++ - '0'; + } + } + *ts = t; + return count; +} +static int __Pyx_BufFmt_ExpectNumber(const char **ts) { + int number = __Pyx_BufFmt_ParseNumber(ts); + if (number == -1) + PyErr_Format(PyExc_ValueError,\ + "Does not understand character buffer dtype format string ('%c')", **ts); + return number; +} +static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { + PyErr_Format(PyExc_ValueError, + "Unexpected format string character: '%c'", ch); +} +static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { + switch (ch) { + case 'c': return "'char'"; + case 'b': return "'signed char'"; + case 'B': return "'unsigned char'"; + case 'h': return "'short'"; + case 'H': return "'unsigned short'"; + case 'i': return "'int'"; + case 'I': return "'unsigned int'"; + case 'l': return "'long'"; + case 'L': return "'unsigned long'"; + case 'q': return "'long long'"; + case 'Q': return "'unsigned long long'"; + case 'f': return (is_complex ? "'complex float'" : "'float'"); + case 'd': return (is_complex ? "'complex double'" : "'double'"); + case 'g': return (is_complex ? "'complex long double'" : "'long double'"); + case 'T': return "a struct"; + case 'O': return "Python object"; + case 'P': return "a pointer"; + case 's': case 'p': return "a string"; + case 0: return "end"; + default: return "unparseable format string"; + } +} +static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return 2; + case 'i': case 'I': case 'l': case 'L': return 4; + case 'q': case 'Q': return 8; + case 'f': return (is_complex ? 8 : 4); + case 'd': return (is_complex ? 16 : 8); + case 'g': { + PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); + return 0; + } + case 'O': case 'P': return sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { + switch (ch) { + case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(short); + case 'i': case 'I': return sizeof(int); + case 'l': case 'L': return sizeof(long); + #ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(PY_LONG_LONG); + #endif + case 'f': return sizeof(float) * (is_complex ? 2 : 1); + case 'd': return sizeof(double) * (is_complex ? 2 : 1); + case 'g': return sizeof(long double) * (is_complex ? 2 : 1); + case 'O': case 'P': return sizeof(void*); + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +typedef struct { char c; short x; } __Pyx_st_short; +typedef struct { char c; int x; } __Pyx_st_int; +typedef struct { char c; long x; } __Pyx_st_long; +typedef struct { char c; float x; } __Pyx_st_float; +typedef struct { char c; double x; } __Pyx_st_double; +typedef struct { char c; long double x; } __Pyx_st_longdouble; +typedef struct { char c; void *x; } __Pyx_st_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_st_float) - sizeof(float); + case 'd': return sizeof(__Pyx_st_double) - sizeof(double); + case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +/* These are for computing the padding at the end of the struct to align + on the first member of the struct. This will probably the same as above, + but we don't have any guarantees. + */ +typedef struct { short x; char c; } __Pyx_pad_short; +typedef struct { int x; char c; } __Pyx_pad_int; +typedef struct { long x; char c; } __Pyx_pad_long; +typedef struct { float x; char c; } __Pyx_pad_float; +typedef struct { double x; char c; } __Pyx_pad_double; +typedef struct { long double x; char c; } __Pyx_pad_longdouble; +typedef struct { void *x; char c; } __Pyx_pad_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); + case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); + case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { + switch (ch) { + case 'c': + return 'H'; + case 'b': case 'h': case 'i': + case 'l': case 'q': case 's': case 'p': + return 'I'; + case 'B': case 'H': case 'I': case 'L': case 'Q': + return 'U'; + case 'f': case 'd': case 'g': + return (is_complex ? 'C' : 'R'); + case 'O': + return 'O'; + case 'P': + return 'P'; + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { + if (ctx->head == NULL || ctx->head->field == &ctx->root) { + const char* expected; + const char* quote; + if (ctx->head == NULL) { + expected = "end"; + quote = ""; + } else { + expected = ctx->head->field->type->name; + quote = "'"; + } + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected %s%s%s but got %s", + quote, expected, quote, + __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); + } else { + __Pyx_StructField* field = ctx->head->field; + __Pyx_StructField* parent = (ctx->head - 1)->field; + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", + field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), + parent->type->name, field->name); + } +} +static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { + char group; + size_t size, offset, arraysize = 1; + if (ctx->enc_type == 0) return 0; + if (ctx->head->field->type->arraysize[0]) { + int i, ndim = 0; + if (ctx->enc_type == 's' || ctx->enc_type == 'p') { + ctx->is_valid_array = ctx->head->field->type->ndim == 1; + ndim = 1; + if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %zu", + ctx->head->field->type->arraysize[0], ctx->enc_count); + return -1; + } + } + if (!ctx->is_valid_array) { + PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", + ctx->head->field->type->ndim, ndim); + return -1; + } + for (i = 0; i < ctx->head->field->type->ndim; i++) { + arraysize *= ctx->head->field->type->arraysize[i]; + } + ctx->is_valid_array = 0; + ctx->enc_count = 1; + } + group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); + do { + __Pyx_StructField* field = ctx->head->field; + __Pyx_TypeInfo* type = field->type; + if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { + size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); + } else { + size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); + } + if (ctx->enc_packmode == '@') { + size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); + size_t align_mod_offset; + if (align_at == 0) return -1; + align_mod_offset = ctx->fmt_offset % align_at; + if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; + if (ctx->struct_alignment == 0) + ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, + ctx->is_complex); + } + if (type->size != size || type->typegroup != group) { + if (type->typegroup == 'C' && type->fields != NULL) { + size_t parent_offset = ctx->head->parent_offset + field->offset; + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = parent_offset; + continue; + } + if ((type->typegroup == 'H' || group == 'H') && type->size == size) { + } else { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + } + offset = ctx->head->parent_offset + field->offset; + if (ctx->fmt_offset != offset) { + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", + (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); + return -1; + } + ctx->fmt_offset += size; + if (arraysize) + ctx->fmt_offset += (arraysize - 1) * size; + --ctx->enc_count; + while (1) { + if (field == &ctx->root) { + ctx->head = NULL; + if (ctx->enc_count != 0) { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + break; + } + ctx->head->field = ++field; + if (field->type == NULL) { + --ctx->head; + field = ctx->head->field; + continue; + } else if (field->type->typegroup == 'S') { + size_t parent_offset = ctx->head->parent_offset + field->offset; + if (field->type->fields->type == NULL) continue; + field = field->type->fields; + ++ctx->head; + ctx->head->field = field; + ctx->head->parent_offset = parent_offset; + break; + } else { + break; + } + } + } while (ctx->enc_count); + ctx->enc_type = 0; + ctx->is_complex = 0; + return 0; +} +static PyObject * +__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) +{ + const char *ts = *tsp; + int i = 0, number; + int ndim = ctx->head->field->type->ndim; +; + ++ts; + if (ctx->new_count != 1) { + PyErr_SetString(PyExc_ValueError, + "Cannot handle repeated arrays in format string"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + while (*ts && *ts != ')') { + switch (*ts) { + case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; + default: break; + } + number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) + return PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %d", + ctx->head->field->type->arraysize[i], number); + if (*ts != ',' && *ts != ')') + return PyErr_Format(PyExc_ValueError, + "Expected a comma in format string, got '%c'", *ts); + if (*ts == ',') ts++; + i++; + } + if (i != ndim) + return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", + ctx->head->field->type->ndim, i); + if (!*ts) { + PyErr_SetString(PyExc_ValueError, + "Unexpected end of format string, expected ')'"); + return NULL; + } + ctx->is_valid_array = 1; + ctx->new_count = 1; + *tsp = ++ts; + return Py_None; +} +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { + int got_Z = 0; + while (1) { + switch(*ts) { + case 0: + if (ctx->enc_type != 0 && ctx->head == NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + if (ctx->head != NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + return ts; + case ' ': + case '\r': + case '\n': + ++ts; + break; + case '<': + if (!__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '>': + case '!': + if (__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '=': + case '@': + case '^': + ctx->new_packmode = *ts++; + break; + case 'T': + { + const char* ts_after_sub; + size_t i, struct_count = ctx->new_count; + size_t struct_alignment = ctx->struct_alignment; + ctx->new_count = 1; + ++ts; + if (*ts != '{') { + PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + ctx->enc_count = 0; + ctx->struct_alignment = 0; + ++ts; + ts_after_sub = ts; + for (i = 0; i != struct_count; ++i) { + ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); + if (!ts_after_sub) return NULL; + } + ts = ts_after_sub; + if (struct_alignment) ctx->struct_alignment = struct_alignment; + } + break; + case '}': + { + size_t alignment = ctx->struct_alignment; + ++ts; + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + if (alignment && ctx->fmt_offset % alignment) { + ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); + } + } + return ts; + case 'x': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->fmt_offset += ctx->new_count; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->enc_packmode = ctx->new_packmode; + ++ts; + break; + case 'Z': + got_Z = 1; + ++ts; + if (*ts != 'f' && *ts != 'd' && *ts != 'g') { + __Pyx_BufFmt_RaiseUnexpectedChar('Z'); + return NULL; + } + case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': + case 'l': case 'L': case 'q': case 'Q': + case 'f': case 'd': case 'g': + case 'O': case 'p': + if (ctx->enc_type == *ts && got_Z == ctx->is_complex && + ctx->enc_packmode == ctx->new_packmode) { + ctx->enc_count += ctx->new_count; + ctx->new_count = 1; + got_Z = 0; + ++ts; + break; + } + case 's': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_count = ctx->new_count; + ctx->enc_packmode = ctx->new_packmode; + ctx->enc_type = *ts; + ctx->is_complex = got_Z; + ++ts; + ctx->new_count = 1; + got_Z = 0; + break; + case ':': + ++ts; + while(*ts != ':') ++ts; + ++ts; + break; + case '(': + if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; + break; + default: + { + int number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + ctx->new_count = (size_t)number; + } + } + } +} + +/* BufferGetAndValidate */ + static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { + if (unlikely(info->buf == NULL)) return; + if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; + __Pyx_ReleaseBuffer(info); +} +static void __Pyx_ZeroBuffer(Py_buffer* buf) { + buf->buf = NULL; + buf->obj = NULL; + buf->strides = __Pyx_zeros; + buf->shape = __Pyx_zeros; + buf->suboffsets = __Pyx_minusones; +} +static int __Pyx__GetBufferAndValidate( + Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, + int nd, int cast, __Pyx_BufFmt_StackElem* stack) +{ + buf->buf = NULL; + if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { + __Pyx_ZeroBuffer(buf); + return -1; + } + if (unlikely(buf->ndim != nd)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + nd, buf->ndim); + goto fail; + } + if (!cast) { + __Pyx_BufFmt_Context ctx; + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; + } + if (unlikely((unsigned)buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", + buf->itemsize, (buf->itemsize > 1) ? "s" : "", + dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; + return 0; +fail:; + __Pyx_SafeReleaseBuffer(buf); + return -1; +} + +/* None */ + static CYTHON_INLINE long __Pyx_mod_long(long a, long b) { + long r = a % b; + r += ((r != 0) & ((r ^ b) < 0)) * b; + return r; +} + +/* PyObjectCall */ + #if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = func->ob_type->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyErrFetchRestore */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* RaiseException */ + #if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* GetModuleGlobalName */ + static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS + result = PyDict_GetItem(__pyx_d, name); + if (likely(result)) { + Py_INCREF(result); + } else { +#else + result = PyObject_GetItem(__pyx_d, name); + if (!result) { + PyErr_Clear(); +#endif + result = __Pyx_GetBuiltinName(name); + } + return result; +} + +/* None */ + static CYTHON_INLINE long __Pyx_div_long(long a, long b) { + long q = a / b; + long r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* ExtTypeTest */ + static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", + Py_TYPE(obj)->tp_name, type->tp_name); + return 0; +} + +/* RaiseTooManyValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* RaiseNoneIterError */ + static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/* SaveResetException */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if PY_VERSION_HEX >= 0x030700A2 + *type = tstate->exc_state.exc_type; + *value = tstate->exc_state.exc_value; + *tb = tstate->exc_state.exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if PY_VERSION_HEX >= 0x030700A2 + tmp_type = tstate->exc_state.exc_type; + tmp_value = tstate->exc_state.exc_value; + tmp_tb = tstate->exc_state.exc_traceback; + tstate->exc_state.exc_type = type; + tstate->exc_state.exc_value = value; + tstate->exc_state.exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* PyErrExceptionMatches */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* GetException */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { +#endif + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if PY_VERSION_HEX >= 0x030700A2 + tmp_type = tstate->exc_state.exc_type; + tmp_value = tstate->exc_state.exc_value; + tmp_tb = tstate->exc_state.exc_traceback; + tstate->exc_state.exc_type = local_type; + tstate->exc_state.exc_value = local_value; + tstate->exc_state.exc_traceback = local_tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* Import */ + static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if (strchr(__Pyx_MODULE_NAME, '.')) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* CLineInTraceback */ + #ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (PyObject_Not(use_cline) != 0) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ + static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ + #include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +#if PY_MAJOR_VERSION < 3 +static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { + if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); + PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); + return -1; +} +static void __Pyx_ReleaseBuffer(Py_buffer *view) { + PyObject *obj = view->obj; + if (!obj) return; + if (PyObject_CheckBuffer(obj)) { + PyBuffer_Release(view); + return; + } + if ((0)) {} + else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); + view->obj = NULL; + Py_DECREF(obj); +} +#endif + + + /* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { + const int neg_one = (int) -1, const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* CIntFromPyVerify */ + #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return ::std::complex< float >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return x + y*(__pyx_t_float_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + __pyx_t_float_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabsf(b.real) >= fabsf(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + float r = b.imag / b.real; + float s = 1.0 / (b.real + b.imag * r); + return __pyx_t_float_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + float r = b.real / b.imag; + float s = 1.0 / (b.imag + b.real * r); + return __pyx_t_float_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + float denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_float_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrtf(z.real*z.real + z.imag*z.imag); + #else + return hypotf(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + float r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + float denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(a, a); + case 3: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, a); + case 4: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = powf(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2f(0, -1); + } + } else { + r = __Pyx_c_abs_float(a); + theta = atan2f(a.imag, a.real); + } + lnr = logf(r); + z_r = expf(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cosf(z_theta); + z.imag = z_r * sinf(z_theta); + return z; + } + #endif +#endif + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return ::std::complex< double >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return x + y*(__pyx_t_double_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + __pyx_t_double_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabs(b.real) >= fabs(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + double r = b.imag / b.real; + double s = 1.0 / (b.real + b.imag * r); + return __pyx_t_double_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + double r = b.real / b.imag; + double s = 1.0 / (b.imag + b.real * r); + return __pyx_t_double_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + double denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_double_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrt(z.real*z.real + z.imag*z.imag); + #else + return hypot(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + double r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + double denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(a, a); + case 3: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, a); + case 4: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = pow(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2(0, -1); + } + } else { + r = __Pyx_c_abs_double(a); + theta = atan2(a.imag, a.real); + } + lnr = log(r); + z_r = exp(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cos(z_theta); + z.imag = z_r * sin(z_theta); + return z; + } + #endif +#endif + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { + const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(enum NPY_TYPES) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(enum NPY_TYPES) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { + const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(unsigned int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (unsigned int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0]) + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) { + return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) { + return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) { + return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (unsigned int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(unsigned int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, +digits[0]) + case -2: + if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(unsigned int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + unsigned int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (unsigned int) -1; + } + } else { + unsigned int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (unsigned int) -1; + val = __Pyx_PyInt_As_unsigned_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to unsigned int"); + return (unsigned int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to unsigned int"); + return (unsigned int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { + const int neg_one = (int) -1, const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { + const long neg_one = (long) -1, const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { + const long neg_one = (long) -1, const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* FastTypeChecks */ + #if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { + if (likely(err == exc_type)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); + } + return PyErr_GivenExceptionMatches(err, exc_type); +} +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { + if (likely(err == exc_type1 || err == exc_type2)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); + } + return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); +} +#endif + +/* CheckBinaryVersion */ + static int __Pyx_check_binary_version(void) { + char ctversion[4], rtversion[4]; + PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); + PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); + if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { + char message[200]; + PyOS_snprintf(message, sizeof(message), + "compiletime version %s of module '%.100s' " + "does not match runtime version %s", + ctversion, __Pyx_MODULE_NAME, rtversion); + return PyErr_WarnEx(NULL, message, 1); + } + return 0; +} + +/* ModuleImport */ + #ifndef __PYX_HAVE_RT_ImportModule +#define __PYX_HAVE_RT_ImportModule +static PyObject *__Pyx_ImportModule(const char *name) { + PyObject *py_name = 0; + PyObject *py_module = 0; + py_name = __Pyx_PyIdentifier_FromString(name); + if (!py_name) + goto bad; + py_module = PyImport_Import(py_name); + Py_DECREF(py_name); + return py_module; +bad: + Py_XDECREF(py_name); + return 0; +} +#endif + +/* TypeImport */ + #ifndef __PYX_HAVE_RT_ImportType +#define __PYX_HAVE_RT_ImportType +static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, + size_t size, int strict) +{ + PyObject *py_module = 0; + PyObject *result = 0; + PyObject *py_name = 0; + char warning[200]; + Py_ssize_t basicsize; +#ifdef Py_LIMITED_API + PyObject *py_basicsize; +#endif + py_module = __Pyx_ImportModule(module_name); + if (!py_module) + goto bad; + py_name = __Pyx_PyIdentifier_FromString(class_name); + if (!py_name) + goto bad; + result = PyObject_GetAttr(py_module, py_name); + Py_DECREF(py_name); + py_name = 0; + Py_DECREF(py_module); + py_module = 0; + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#ifndef Py_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if (!strict && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", + module_name, class_name, basicsize, size); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + else if ((size_t)basicsize != size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", + module_name, class_name, basicsize, size); + goto bad; + } + return (PyTypeObject *)result; +bad: + Py_XDECREF(py_module); + Py_XDECREF(result); + return NULL; +} +#endif + +/* InitStrings */ + static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + PyErr_Clear(); + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(x); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/astropy/convolution/boundary_extend.pyx b/astropy/convolution/boundary_extend.pyx new file mode 100644 index 0000000..6cf2cc4 --- /dev/null +++ b/astropy/convolution/boundary_extend.pyx @@ -0,0 +1,187 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import division +import numpy as np +cimport numpy as np + +DTYPE = np.float +ctypedef np.float_t DTYPE_t + +cdef inline int int_max(int a, int b) nogil: return a if a >= b else b +cdef inline int int_min(int a, int b) nogil: return a if a <= b else b + +cdef extern from "numpy/npy_math.h" nogil: + bint npy_isnan(double x) + +cimport cython + + +@cython.boundscheck(False) # turn off bounds-checking for entire function +def convolve1d_boundary_extend(np.ndarray[DTYPE_t, ndim=1] f, + np.ndarray[DTYPE_t, ndim=1] g, + bint normalize_by_kernel + ): + + if g.shape[0] % 2 != 1: + raise ValueError("Convolution kernel must have odd dimensions") + + assert f.dtype == DTYPE and g.dtype == DTYPE + + cdef int nx = f.shape[0] + cdef int nkx = g.shape[0] + cdef int wkx = nkx // 2 + cdef np.ndarray[DTYPE_t, ndim=1] conv = np.empty([nx], dtype=DTYPE) + cdef unsigned int i, iii + cdef int ii + + cdef int iimin, iimax + + cdef DTYPE_t top, bot, ker, val + + # release the GIL + with nogil: + + # Now run the proper convolution + for i in range(nx): + top = 0. + bot = 0. + iimin = i - wkx + iimax = i + wkx + 1 + for ii in range(iimin, iimax): + iii = int_min(int_max(ii, 0), nx - 1) + val = f[iii] + ker = g[(nkx - 1 - (wkx + ii - i))] + if not npy_isnan(val): + top += val * ker + bot += ker + if normalize_by_kernel: + if bot == 0: + conv[i] = f[i] + else: + conv[i] = top / bot + else: + conv[i] = top + # GIL acquired again here + return conv + + +@cython.boundscheck(False) # turn off bounds-checking for entire function +def convolve2d_boundary_extend(np.ndarray[DTYPE_t, ndim=2] f, + np.ndarray[DTYPE_t, ndim=2] g, + bint normalize_by_kernel): + + if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: + raise ValueError("Convolution kernel must have odd dimensions") + + assert f.dtype == DTYPE and g.dtype == DTYPE + + cdef int nx = f.shape[0] + cdef int ny = f.shape[1] + cdef int nkx = g.shape[0] + cdef int nky = g.shape[1] + cdef int wkx = nkx // 2 + cdef int wky = nky // 2 + cdef np.ndarray[DTYPE_t, ndim=2] conv = np.empty([nx, ny], dtype=DTYPE) + cdef unsigned int i, j, iii, jjj + cdef int ii, jj + + cdef int iimin, iimax, jjmin, jjmax + + cdef DTYPE_t top, bot, ker, val + + # release the GIL + with nogil: + + # Now run the proper convolution + for i in range(nx): + for j in range(ny): + top = 0. + bot = 0. + iimin = i - wkx + iimax = i + wkx + 1 + jjmin = j - wky + jjmax = j + wky + 1 + for ii in range(iimin, iimax): + for jj in range(jjmin, jjmax): + iii = int_min(int_max(ii, 0), nx - 1) + jjj = int_min(int_max(jj, 0), ny - 1) + val = f[iii, jjj] + ker = g[(nkx - 1 - (wkx + ii - i)), + (nky - 1 - (wky + jj - j))] + if not npy_isnan(val): + top += val * ker + bot += ker + if normalize_by_kernel: + if bot == 0: + conv[i, j] = f[i, j] + else: + conv[i, j] = top / bot + else: + conv[i, j] = top + # GIL acquired again here + return conv + + +@cython.boundscheck(False) # turn off bounds-checking for entire function +def convolve3d_boundary_extend(np.ndarray[DTYPE_t, ndim=3] f, + np.ndarray[DTYPE_t, ndim=3] g, + bint normalize_by_kernel): + + if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: + raise ValueError("Convolution kernel must have odd dimensions") + + assert f.dtype == DTYPE and g.dtype == DTYPE + + cdef int nx = f.shape[0] + cdef int ny = f.shape[1] + cdef int nz = f.shape[2] + cdef int nkx = g.shape[0] + cdef int nky = g.shape[1] + cdef int nkz = g.shape[2] + cdef int wkx = nkx // 2 + cdef int wky = nky // 2 + cdef int wkz = nkz // 2 + cdef np.ndarray[DTYPE_t, ndim=3] conv = np.empty([nx, ny, nz], dtype=DTYPE) + cdef unsigned int i, j, k, iii, jjj, kkk + cdef int ii, jj, kk + + cdef int iimin, iimax, jjmin, jjmax, kkmin, kkmax + + cdef DTYPE_t top, bot, ker, val + + # release the GIL + with nogil: + + # Now run the proper convolution + for i in range(nx): + for j in range(ny): + for k in range(nz): + top = 0. + bot = 0. + iimin = i - wkx + iimax = i + wkx + 1 + jjmin = j - wky + jjmax = j + wky + 1 + kkmin = k - wkz + kkmax = k + wkz + 1 + for ii in range(iimin, iimax): + for jj in range(jjmin, jjmax): + for kk in range(kkmin, kkmax): + iii = int_min(int_max(ii, 0), nx - 1) + jjj = int_min(int_max(jj, 0), ny - 1) + kkk = int_min(int_max(kk, 0), nz - 1) + val = f[iii, jjj, kkk] + ker = g[(nkx - 1 - (wkx + ii - i)), + (nky - 1 - (wky + jj - j)), + (nkz - 1 - (wkz + kk - k))] + if not npy_isnan(val): + top += val * ker + bot += ker + if normalize_by_kernel: + if bot == 0: + conv[i, j, k] = f[i, j, k] + else: + conv[i, j, k] = top / bot + else: + conv[i, j, k] = top + # GIL acquired again here + return conv diff --git a/astropy/convolution/boundary_fill.c b/astropy/convolution/boundary_fill.c new file mode 100644 index 0000000..b33e8cc --- /dev/null +++ b/astropy/convolution/boundary_fill.c @@ -0,0 +1,9971 @@ +/* Generated by Cython 0.27.3 */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_27_3" +#define CYTHON_FUTURE_DIVISION 1 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + + +#define __PYX_ERR(f_index, lineno, Ln_error) \ +{ \ + __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ +} + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__astropy__convolution__boundary_fill +#define __PYX_HAVE_API__astropy__convolution__boundary_fill +#include +#include +#include "numpy/arrayobject.h" +#include "numpy/ufuncobject.h" +#include "numpy/npy_math.h" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + +/* Header.proto */ +#if !defined(CYTHON_CCOMPLEX) + #if defined(__cplusplus) + #define CYTHON_CCOMPLEX 1 + #elif defined(_Complex_I) + #define CYTHON_CCOMPLEX 1 + #else + #define CYTHON_CCOMPLEX 0 + #endif +#endif +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #include + #else + #include + #endif +#endif +#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) + #undef _Complex_I + #define _Complex_I 1.0fj +#endif + + +static const char *__pyx_f[] = { + "astropy/convolution/boundary_fill.pyx", + "__init__.pxd", + "type.pxd", +}; +/* BufferFormatStructs.proto */ +#define IS_UNSIGNED(type) (((type) -1) > 0) +struct __Pyx_StructField_; +#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) +typedef struct { + const char* name; + struct __Pyx_StructField_* fields; + size_t size; + size_t arraysize[8]; + int ndim; + char typegroup; + char is_unsigned; + int flags; +} __Pyx_TypeInfo; +typedef struct __Pyx_StructField_ { + __Pyx_TypeInfo* type; + const char* name; + size_t offset; +} __Pyx_StructField; +typedef struct { + __Pyx_StructField* field; + size_t parent_offset; +} __Pyx_BufFmt_StackElem; +typedef struct { + __Pyx_StructField root; + __Pyx_BufFmt_StackElem* head; + size_t fmt_offset; + size_t new_count, enc_count; + size_t struct_alignment; + int is_complex; + char enc_type; + char new_packmode; + char enc_packmode; + char is_valid_array; +} __Pyx_BufFmt_Context; + +/* NoFastGil.proto */ +#define __Pyx_PyGILState_Ensure PyGILState_Ensure +#define __Pyx_PyGILState_Release PyGILState_Release +#define __Pyx_FastGIL_Remember() +#define __Pyx_FastGIL_Forget() +#define __Pyx_FastGilFuncInit() + +/* ForceInitThreads.proto */ +#ifndef __PYX_FORCE_INIT_THREADS + #define __PYX_FORCE_INIT_THREADS 0 +#endif + + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":743 + * # in Cython to enable them only on the right systems. + * + * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + */ +typedef npy_int8 __pyx_t_5numpy_int8_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":744 + * + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t + */ +typedef npy_int16 __pyx_t_5numpy_int16_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":745 + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< + * ctypedef npy_int64 int64_t + * #ctypedef npy_int96 int96_t + */ +typedef npy_int32 __pyx_t_5numpy_int32_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":746 + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< + * #ctypedef npy_int96 int96_t + * #ctypedef npy_int128 int128_t + */ +typedef npy_int64 __pyx_t_5numpy_int64_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":750 + * #ctypedef npy_int128 int128_t + * + * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + */ +typedef npy_uint8 __pyx_t_5numpy_uint8_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":751 + * + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t + */ +typedef npy_uint16 __pyx_t_5numpy_uint16_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":752 + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< + * ctypedef npy_uint64 uint64_t + * #ctypedef npy_uint96 uint96_t + */ +typedef npy_uint32 __pyx_t_5numpy_uint32_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":753 + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< + * #ctypedef npy_uint96 uint96_t + * #ctypedef npy_uint128 uint128_t + */ +typedef npy_uint64 __pyx_t_5numpy_uint64_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":757 + * #ctypedef npy_uint128 uint128_t + * + * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< + * ctypedef npy_float64 float64_t + * #ctypedef npy_float80 float80_t + */ +typedef npy_float32 __pyx_t_5numpy_float32_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":758 + * + * ctypedef npy_float32 float32_t + * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< + * #ctypedef npy_float80 float80_t + * #ctypedef npy_float128 float128_t + */ +typedef npy_float64 __pyx_t_5numpy_float64_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":767 + * # The int types are mapped a bit surprising -- + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t + */ +typedef npy_long __pyx_t_5numpy_int_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":768 + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong longlong_t + * + */ +typedef npy_longlong __pyx_t_5numpy_long_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":769 + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_ulong uint_t + */ +typedef npy_longlong __pyx_t_5numpy_longlong_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":771 + * ctypedef npy_longlong longlong_t + * + * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t + */ +typedef npy_ulong __pyx_t_5numpy_uint_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":772 + * + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulonglong_t + * + */ +typedef npy_ulonglong __pyx_t_5numpy_ulong_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":773 + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_intp intp_t + */ +typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775 + * ctypedef npy_ulonglong ulonglong_t + * + * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< + * ctypedef npy_uintp uintp_t + * + */ +typedef npy_intp __pyx_t_5numpy_intp_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776 + * + * ctypedef npy_intp intp_t + * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< + * + * ctypedef npy_double float_t + */ +typedef npy_uintp __pyx_t_5numpy_uintp_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778 + * ctypedef npy_uintp uintp_t + * + * ctypedef npy_double float_t # <<<<<<<<<<<<<< + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t + */ +typedef npy_double __pyx_t_5numpy_float_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779 + * + * ctypedef npy_double float_t + * ctypedef npy_double double_t # <<<<<<<<<<<<<< + * ctypedef npy_longdouble longdouble_t + * + */ +typedef npy_double __pyx_t_5numpy_double_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":780 + * ctypedef npy_double float_t + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cfloat cfloat_t + */ +typedef npy_longdouble __pyx_t_5numpy_longdouble_t; + +/* "astropy/convolution/boundary_fill.pyx":8 + * + * DTYPE = np.float + * ctypedef np.float_t DTYPE_t # <<<<<<<<<<<<<< + * + * cdef extern from "numpy/npy_math.h" nogil: + */ +typedef __pyx_t_5numpy_float_t __pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t; +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< float > __pyx_t_float_complex; + #else + typedef float _Complex __pyx_t_float_complex; + #endif +#else + typedef struct { float real, imag; } __pyx_t_float_complex; +#endif +static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); + +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< double > __pyx_t_double_complex; + #else + typedef double _Complex __pyx_t_double_complex; + #endif +#else + typedef struct { double real, imag; } __pyx_t_double_complex; +#endif +static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); + + +/*--- Type declarations ---*/ + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":782 + * ctypedef npy_longdouble longdouble_t + * + * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t + */ +typedef npy_cfloat __pyx_t_5numpy_cfloat_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":783 + * + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< + * ctypedef npy_clongdouble clongdouble_t + * + */ +typedef npy_cdouble __pyx_t_5numpy_cdouble_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784 + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cdouble complex_t + */ +typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":786 + * ctypedef npy_clongdouble clongdouble_t + * + * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew1(a): + */ +typedef npy_cdouble __pyx_t_5numpy_complex_t; + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* IsLittleEndian.proto */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); + +/* BufferFormatCheck.proto */ +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type); + +/* BufferGetAndValidate.proto */ +#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ + ((obj == Py_None || obj == NULL) ?\ + (__Pyx_ZeroBuffer(buf), 0) :\ + __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) +static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, + __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); +static void __Pyx_ZeroBuffer(Py_buffer* buf); +static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); +static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; +static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + +/* None.proto */ +static CYTHON_INLINE long __Pyx_mod_long(long, long); + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* GetModuleGlobalName.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); + +/* None.proto */ +static CYTHON_INLINE long __Pyx_div_long(long, long); + +/* ExtTypeTest.proto */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); + +#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) +#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) +#define __Pyx_BufPtrStrided3d(type, buf, i0, s0, i1, s1, i2, s2) (type)((char*)buf + i0 * s0 + i1 * s1 + i2 * s2) +/* DictGetItem.proto */ +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { + PyObject *value; + value = PyDict_GetItemWithError(d, key); + if (unlikely(!value)) { + if (!PyErr_Occurred()) { + PyObject* args = PyTuple_Pack(1, key); + if (likely(args)) + PyErr_SetObject(PyExc_KeyError, args); + Py_XDECREF(args); + } + return NULL; + } + Py_INCREF(value); + return value; +} +#else + #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) +#endif + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* RaiseNoneIterError.proto */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* BufferStructDeclare.proto */ +typedef struct { + Py_ssize_t shape, strides, suboffsets; +} __Pyx_Buf_DimInfo; +typedef struct { + size_t refcount; + Py_buffer pybuffer; +} __Pyx_Buffer; +typedef struct { + __Pyx_Buffer *rcbuffer; + char *data; + __Pyx_Buf_DimInfo diminfo[8]; +} __Pyx_LocalBuf_ND; + +#if PY_MAJOR_VERSION < 3 + static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); + static void __Pyx_ReleaseBuffer(Py_buffer *view); +#else + #define __Pyx_GetBuffer PyObject_GetBuffer + #define __Pyx_ReleaseBuffer PyBuffer_Release +#endif + + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* RealImag.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #define __Pyx_CREAL(z) ((z).real()) + #define __Pyx_CIMAG(z) ((z).imag()) + #else + #define __Pyx_CREAL(z) (__real__(z)) + #define __Pyx_CIMAG(z) (__imag__(z)) + #endif +#else + #define __Pyx_CREAL(z) ((z).real) + #define __Pyx_CIMAG(z) ((z).imag) +#endif +#if defined(__cplusplus) && CYTHON_CCOMPLEX\ + && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) + #define __Pyx_SET_CREAL(z,x) ((z).real(x)) + #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) +#else + #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) + #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_float(a, b) ((a)==(b)) + #define __Pyx_c_sum_float(a, b) ((a)+(b)) + #define __Pyx_c_diff_float(a, b) ((a)-(b)) + #define __Pyx_c_prod_float(a, b) ((a)*(b)) + #define __Pyx_c_quot_float(a, b) ((a)/(b)) + #define __Pyx_c_neg_float(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_float(z) ((z)==(float)0) + #define __Pyx_c_conj_float(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_float(z) (::std::abs(z)) + #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_float(z) ((z)==0) + #define __Pyx_c_conj_float(z) (conjf(z)) + #if 1 + #define __Pyx_c_abs_float(z) (cabsf(z)) + #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); + #endif +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_double(a, b) ((a)==(b)) + #define __Pyx_c_sum_double(a, b) ((a)+(b)) + #define __Pyx_c_diff_double(a, b) ((a)-(b)) + #define __Pyx_c_prod_double(a, b) ((a)*(b)) + #define __Pyx_c_quot_double(a, b) ((a)/(b)) + #define __Pyx_c_neg_double(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_double(z) ((z)==(double)0) + #define __Pyx_c_conj_double(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (::std::abs(z)) + #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_double(z) ((z)==0) + #define __Pyx_c_conj_double(z) (conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (cabs(z)) + #define __Pyx_c_pow_double(a, b) (cpow(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); + #endif +#endif + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* PyIdentifierFromString.proto */ +#if !defined(__Pyx_PyIdentifier_FromString) +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) +#else + #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) +#endif +#endif + +/* ModuleImport.proto */ +static PyObject *__Pyx_ImportModule(const char *name); + +/* TypeImport.proto */ +static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + + +/* Module declarations from 'cpython.buffer' */ + +/* Module declarations from 'libc.string' */ + +/* Module declarations from 'libc.stdio' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.type' */ +static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; + +/* Module declarations from 'cpython' */ + +/* Module declarations from 'cpython.object' */ + +/* Module declarations from 'cpython.ref' */ + +/* Module declarations from 'cpython.mem' */ + +/* Module declarations from 'numpy' */ + +/* Module declarations from 'numpy' */ +static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; +static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; +static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; +static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; +static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ + +/* Module declarations from 'cython' */ + +/* Module declarations from 'astropy.convolution.boundary_fill' */ +static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t), { 0 }, 0, 'R', 0, 0 }; +#define __Pyx_MODULE_NAME "astropy.convolution.boundary_fill" +extern int __pyx_module_is_main_astropy__convolution__boundary_fill; +int __pyx_module_is_main_astropy__convolution__boundary_fill = 0; + +/* Implementation of 'astropy.convolution.boundary_fill' */ +static PyObject *__pyx_builtin_ValueError; +static PyObject *__pyx_builtin_range; +static PyObject *__pyx_builtin_RuntimeError; +static PyObject *__pyx_builtin_ImportError; +static const char __pyx_k_f[] = "f"; +static const char __pyx_k_g[] = "g"; +static const char __pyx_k_i[] = "i"; +static const char __pyx_k_j[] = "j"; +static const char __pyx_k_k[] = "k"; +static const char __pyx_k_ii[] = "ii"; +static const char __pyx_k_jj[] = "jj"; +static const char __pyx_k_kk[] = "kk"; +static const char __pyx_k_np[] = "np"; +static const char __pyx_k_nx[] = "nx"; +static const char __pyx_k_ny[] = "ny"; +static const char __pyx_k_nz[] = "nz"; +static const char __pyx_k_bot[] = "bot"; +static const char __pyx_k_iii[] = "iii"; +static const char __pyx_k_jjj[] = "jjj"; +static const char __pyx_k_ker[] = "ker"; +static const char __pyx_k_kkk[] = "kkk"; +static const char __pyx_k_nkx[] = "nkx"; +static const char __pyx_k_nky[] = "nky"; +static const char __pyx_k_nkz[] = "nkz"; +static const char __pyx_k_top[] = "top"; +static const char __pyx_k_val[] = "val"; +static const char __pyx_k_wkx[] = "wkx"; +static const char __pyx_k_wky[] = "wky"; +static const char __pyx_k_wkz[] = "wkz"; +static const char __pyx_k_conv[] = "conv"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_DTYPE[] = "DTYPE"; +static const char __pyx_k_dtype[] = "dtype"; +static const char __pyx_k_empty[] = "empty"; +static const char __pyx_k_float[] = "float"; +static const char __pyx_k_iimax[] = "iimax"; +static const char __pyx_k_iimin[] = "iimin"; +static const char __pyx_k_jjmax[] = "jjmax"; +static const char __pyx_k_jjmin[] = "jjmin"; +static const char __pyx_k_kkmax[] = "kkmax"; +static const char __pyx_k_kkmin[] = "kkmin"; +static const char __pyx_k_numpy[] = "numpy"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_ValueError[] = "ValueError"; +static const char __pyx_k_fill_value[] = "fill_value"; +static const char __pyx_k_ImportError[] = "ImportError"; +static const char __pyx_k_RuntimeError[] = "RuntimeError"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_normalize_by_kernel[] = "normalize_by_kernel"; +static const char __pyx_k_convolve1d_boundary_fill[] = "convolve1d_boundary_fill"; +static const char __pyx_k_convolve2d_boundary_fill[] = "convolve2d_boundary_fill"; +static const char __pyx_k_convolve3d_boundary_fill[] = "convolve3d_boundary_fill"; +static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; +static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; +static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; +static const char __pyx_k_Convolution_kernel_must_have_odd[] = "Convolution kernel must have odd dimensions"; +static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; +static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; +static const char __pyx_k_astropy_convolution_boundary_fil[] = "astropy/convolution/boundary_fill.pyx"; +static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; +static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; +static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; +static const char __pyx_k_astropy_convolution_boundary_fil_2[] = "astropy.convolution.boundary_fill"; +static PyObject *__pyx_kp_s_Convolution_kernel_must_have_odd; +static PyObject *__pyx_n_s_DTYPE; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; +static PyObject *__pyx_n_s_ImportError; +static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; +static PyObject *__pyx_n_s_RuntimeError; +static PyObject *__pyx_n_s_ValueError; +static PyObject *__pyx_kp_s_astropy_convolution_boundary_fil; +static PyObject *__pyx_n_s_astropy_convolution_boundary_fil_2; +static PyObject *__pyx_n_s_bot; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_conv; +static PyObject *__pyx_n_s_convolve1d_boundary_fill; +static PyObject *__pyx_n_s_convolve2d_boundary_fill; +static PyObject *__pyx_n_s_convolve3d_boundary_fill; +static PyObject *__pyx_n_s_dtype; +static PyObject *__pyx_n_s_empty; +static PyObject *__pyx_n_s_f; +static PyObject *__pyx_n_s_fill_value; +static PyObject *__pyx_n_s_float; +static PyObject *__pyx_n_s_g; +static PyObject *__pyx_n_s_i; +static PyObject *__pyx_n_s_ii; +static PyObject *__pyx_n_s_iii; +static PyObject *__pyx_n_s_iimax; +static PyObject *__pyx_n_s_iimin; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_n_s_j; +static PyObject *__pyx_n_s_jj; +static PyObject *__pyx_n_s_jjj; +static PyObject *__pyx_n_s_jjmax; +static PyObject *__pyx_n_s_jjmin; +static PyObject *__pyx_n_s_k; +static PyObject *__pyx_n_s_ker; +static PyObject *__pyx_n_s_kk; +static PyObject *__pyx_n_s_kkk; +static PyObject *__pyx_n_s_kkmax; +static PyObject *__pyx_n_s_kkmin; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; +static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; +static PyObject *__pyx_n_s_nkx; +static PyObject *__pyx_n_s_nky; +static PyObject *__pyx_n_s_nkz; +static PyObject *__pyx_n_s_normalize_by_kernel; +static PyObject *__pyx_n_s_np; +static PyObject *__pyx_n_s_numpy; +static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; +static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; +static PyObject *__pyx_n_s_nx; +static PyObject *__pyx_n_s_ny; +static PyObject *__pyx_n_s_nz; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_n_s_top; +static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; +static PyObject *__pyx_n_s_val; +static PyObject *__pyx_n_s_wkx; +static PyObject *__pyx_n_s_wky; +static PyObject *__pyx_n_s_wkz; +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_fill_convolve1d_boundary_fill(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, float __pyx_v_fill_value, int __pyx_v_normalize_by_kernel); /* proto */ +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_fill_2convolve2d_boundary_fill(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, float __pyx_v_fill_value, int __pyx_v_normalize_by_kernel); /* proto */ +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_fill_4convolve3d_boundary_fill(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, float __pyx_v_fill_value, int __pyx_v_normalize_by_kernel); /* proto */ +static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ +static PyObject *__pyx_tuple_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_tuple__7; +static PyObject *__pyx_tuple__8; +static PyObject *__pyx_tuple__9; +static PyObject *__pyx_tuple__10; +static PyObject *__pyx_tuple__11; +static PyObject *__pyx_tuple__12; +static PyObject *__pyx_tuple__13; +static PyObject *__pyx_tuple__15; +static PyObject *__pyx_tuple__17; +static PyObject *__pyx_codeobj__14; +static PyObject *__pyx_codeobj__16; +static PyObject *__pyx_codeobj__18; + +/* "astropy/convolution/boundary_fill.pyx":17 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_fill(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * float fill_value, + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_fill_1convolve1d_boundary_fill(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_11convolution_13boundary_fill_1convolve1d_boundary_fill = {"convolve1d_boundary_fill", (PyCFunction)__pyx_pw_7astropy_11convolution_13boundary_fill_1convolve1d_boundary_fill, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_fill_1convolve1d_boundary_fill(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_f = 0; + PyArrayObject *__pyx_v_g = 0; + float __pyx_v_fill_value; + int __pyx_v_normalize_by_kernel; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("convolve1d_boundary_fill (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_f,&__pyx_n_s_g,&__pyx_n_s_fill_value,&__pyx_n_s_normalize_by_kernel,0}; + PyObject* values[4] = {0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_f)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_g)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve1d_boundary_fill", 1, 4, 4, 1); __PYX_ERR(0, 17, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fill_value)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve1d_boundary_fill", 1, 4, 4, 2); __PYX_ERR(0, 17, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_normalize_by_kernel)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve1d_boundary_fill", 1, 4, 4, 3); __PYX_ERR(0, 17, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "convolve1d_boundary_fill") < 0)) __PYX_ERR(0, 17, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + } + __pyx_v_f = ((PyArrayObject *)values[0]); + __pyx_v_g = ((PyArrayObject *)values[1]); + __pyx_v_fill_value = __pyx_PyFloat_AsFloat(values[2]); if (unlikely((__pyx_v_fill_value == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 19, __pyx_L3_error) + __pyx_v_normalize_by_kernel = __Pyx_PyObject_IsTrue(values[3]); if (unlikely((__pyx_v_normalize_by_kernel == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("convolve1d_boundary_fill", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 17, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.convolution.boundary_fill.convolve1d_boundary_fill", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_f), __pyx_ptype_5numpy_ndarray, 1, "f", 0))) __PYX_ERR(0, 17, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_g), __pyx_ptype_5numpy_ndarray, 1, "g", 0))) __PYX_ERR(0, 18, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_11convolution_13boundary_fill_convolve1d_boundary_fill(__pyx_self, __pyx_v_f, __pyx_v_g, __pyx_v_fill_value, __pyx_v_normalize_by_kernel); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_fill_convolve1d_boundary_fill(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, float __pyx_v_fill_value, int __pyx_v_normalize_by_kernel) { + int __pyx_v_nx; + int __pyx_v_nkx; + int __pyx_v_wkx; + PyArrayObject *__pyx_v_conv = 0; + unsigned int __pyx_v_i; + int __pyx_v_ii; + int __pyx_v_iimin; + int __pyx_v_iimax; + __pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t __pyx_v_top; + __pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t __pyx_v_bot; + __pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t __pyx_v_ker; + __pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t __pyx_v_val; + __Pyx_LocalBuf_ND __pyx_pybuffernd_conv; + __Pyx_Buffer __pyx_pybuffer_conv; + __Pyx_LocalBuf_ND __pyx_pybuffernd_f; + __Pyx_Buffer __pyx_pybuffer_f; + __Pyx_LocalBuf_ND __pyx_pybuffernd_g; + __Pyx_Buffer __pyx_pybuffer_g; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + PyArrayObject *__pyx_t_7 = NULL; + int __pyx_t_8; + unsigned int __pyx_t_9; + int __pyx_t_10; + int __pyx_t_11; + Py_ssize_t __pyx_t_12; + size_t __pyx_t_13; + size_t __pyx_t_14; + size_t __pyx_t_15; + size_t __pyx_t_16; + size_t __pyx_t_17; + __Pyx_RefNannySetupContext("convolve1d_boundary_fill", 0); + __pyx_pybuffer_conv.pybuffer.buf = NULL; + __pyx_pybuffer_conv.refcount = 0; + __pyx_pybuffernd_conv.data = NULL; + __pyx_pybuffernd_conv.rcbuffer = &__pyx_pybuffer_conv; + __pyx_pybuffer_f.pybuffer.buf = NULL; + __pyx_pybuffer_f.refcount = 0; + __pyx_pybuffernd_f.data = NULL; + __pyx_pybuffernd_f.rcbuffer = &__pyx_pybuffer_f; + __pyx_pybuffer_g.pybuffer.buf = NULL; + __pyx_pybuffer_g.refcount = 0; + __pyx_pybuffernd_g.data = NULL; + __pyx_pybuffernd_g.rcbuffer = &__pyx_pybuffer_g; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_f.rcbuffer->pybuffer, (PyObject*)__pyx_v_f, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 17, __pyx_L1_error) + } + __pyx_pybuffernd_f.diminfo[0].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_f.diminfo[0].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[0]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_g.rcbuffer->pybuffer, (PyObject*)__pyx_v_g, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 17, __pyx_L1_error) + } + __pyx_pybuffernd_g.diminfo[0].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_g.diminfo[0].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[0]; + + /* "astropy/convolution/boundary_fill.pyx":23 + * ): + * + * if g.shape[0] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + __pyx_t_1 = ((__Pyx_mod_long((__pyx_v_g->dimensions[0]), 2) != 1) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":24 + * + * if g.shape[0] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(0, 24, __pyx_L1_error) + + /* "astropy/convolution/boundary_fill.pyx":23 + * ): + * + * if g.shape[0] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + } + + /* "astropy/convolution/boundary_fill.pyx":26 + * raise ValueError("Convolution kernel must have odd dimensions") + * + * assert f.dtype == DTYPE and g.dtype == DTYPE # <<<<<<<<<<<<<< + * + * cdef int nx = f.shape[0] + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_f), __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 26, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 26, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_5) { + } else { + __pyx_t_1 = __pyx_t_5; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_g), __pyx_n_s_dtype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = PyObject_RichCompare(__pyx_t_4, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 26, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 26, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_1 = __pyx_t_5; + __pyx_L4_bool_binop_done:; + if (unlikely(!__pyx_t_1)) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 26, __pyx_L1_error) + } + } + #endif + + /* "astropy/convolution/boundary_fill.pyx":28 + * assert f.dtype == DTYPE and g.dtype == DTYPE + * + * cdef int nx = f.shape[0] # <<<<<<<<<<<<<< + * cdef int nkx = g.shape[0] + * cdef int wkx = nkx // 2 + */ + __pyx_v_nx = (__pyx_v_f->dimensions[0]); + + /* "astropy/convolution/boundary_fill.pyx":29 + * + * cdef int nx = f.shape[0] + * cdef int nkx = g.shape[0] # <<<<<<<<<<<<<< + * cdef int wkx = nkx // 2 + * cdef np.ndarray[DTYPE_t, ndim=1] conv = np.empty([nx], dtype=DTYPE) + */ + __pyx_v_nkx = (__pyx_v_g->dimensions[0]); + + /* "astropy/convolution/boundary_fill.pyx":30 + * cdef int nx = f.shape[0] + * cdef int nkx = g.shape[0] + * cdef int wkx = nkx // 2 # <<<<<<<<<<<<<< + * cdef np.ndarray[DTYPE_t, ndim=1] conv = np.empty([nx], dtype=DTYPE) + * cdef unsigned int i, iii + */ + __pyx_v_wkx = __Pyx_div_long(__pyx_v_nkx, 2); + + /* "astropy/convolution/boundary_fill.pyx":31 + * cdef int nkx = g.shape[0] + * cdef int wkx = nkx // 2 + * cdef np.ndarray[DTYPE_t, ndim=1] conv = np.empty([nx], dtype=DTYPE) # <<<<<<<<<<<<<< + * cdef unsigned int i, iii + * cdef int ii + */ + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 31, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_empty); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 31, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 31, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = PyList_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 31, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_2); + PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); + __pyx_t_2 = 0; + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 31, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 31, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 31, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 31, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 31, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 31, __pyx_L1_error) + __pyx_t_7 = ((PyArrayObject *)__pyx_t_6); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_conv.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { + __pyx_v_conv = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 31, __pyx_L1_error) + } else {__pyx_pybuffernd_conv.diminfo[0].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_conv.diminfo[0].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[0]; + } + } + __pyx_t_7 = 0; + __pyx_v_conv = ((PyArrayObject *)__pyx_t_6); + __pyx_t_6 = 0; + + /* "astropy/convolution/boundary_fill.pyx":40 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + + /* "astropy/convolution/boundary_fill.pyx":43 + * + * # Now run the proper convolution + * for i in range(nx): # <<<<<<<<<<<<<< + * top = 0. + * bot = 0. + */ + __pyx_t_8 = __pyx_v_nx; + for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { + __pyx_v_i = __pyx_t_9; + + /* "astropy/convolution/boundary_fill.pyx":44 + * # Now run the proper convolution + * for i in range(nx): + * top = 0. # <<<<<<<<<<<<<< + * bot = 0. + * iimin = i - wkx + */ + __pyx_v_top = 0.; + + /* "astropy/convolution/boundary_fill.pyx":45 + * for i in range(nx): + * top = 0. + * bot = 0. # <<<<<<<<<<<<<< + * iimin = i - wkx + * iimax = i + wkx + 1 + */ + __pyx_v_bot = 0.; + + /* "astropy/convolution/boundary_fill.pyx":46 + * top = 0. + * bot = 0. + * iimin = i - wkx # <<<<<<<<<<<<<< + * iimax = i + wkx + 1 + * for ii in range(iimin, iimax): + */ + __pyx_v_iimin = (__pyx_v_i - __pyx_v_wkx); + + /* "astropy/convolution/boundary_fill.pyx":47 + * bot = 0. + * iimin = i - wkx + * iimax = i + wkx + 1 # <<<<<<<<<<<<<< + * for ii in range(iimin, iimax): + * if ii < 0 or ii > nx - 1: + */ + __pyx_v_iimax = ((__pyx_v_i + __pyx_v_wkx) + 1); + + /* "astropy/convolution/boundary_fill.pyx":48 + * iimin = i - wkx + * iimax = i + wkx + 1 + * for ii in range(iimin, iimax): # <<<<<<<<<<<<<< + * if ii < 0 or ii > nx - 1: + * val = fill_value + */ + __pyx_t_10 = __pyx_v_iimax; + for (__pyx_t_11 = __pyx_v_iimin; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { + __pyx_v_ii = __pyx_t_11; + + /* "astropy/convolution/boundary_fill.pyx":49 + * iimax = i + wkx + 1 + * for ii in range(iimin, iimax): + * if ii < 0 or ii > nx - 1: # <<<<<<<<<<<<<< + * val = fill_value + * else: + */ + __pyx_t_5 = ((__pyx_v_ii < 0) != 0); + if (!__pyx_t_5) { + } else { + __pyx_t_1 = __pyx_t_5; + goto __pyx_L14_bool_binop_done; + } + __pyx_t_5 = ((__pyx_v_ii > (__pyx_v_nx - 1)) != 0); + __pyx_t_1 = __pyx_t_5; + __pyx_L14_bool_binop_done:; + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":50 + * for ii in range(iimin, iimax): + * if ii < 0 or ii > nx - 1: + * val = fill_value # <<<<<<<<<<<<<< + * else: + * val = f[ii] + */ + __pyx_v_val = __pyx_v_fill_value; + + /* "astropy/convolution/boundary_fill.pyx":49 + * iimax = i + wkx + 1 + * for ii in range(iimin, iimax): + * if ii < 0 or ii > nx - 1: # <<<<<<<<<<<<<< + * val = fill_value + * else: + */ + goto __pyx_L13; + } + + /* "astropy/convolution/boundary_fill.pyx":52 + * val = fill_value + * else: + * val = f[ii] # <<<<<<<<<<<<<< + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): + */ + /*else*/ { + __pyx_t_12 = __pyx_v_ii; + if (__pyx_t_12 < 0) __pyx_t_12 += __pyx_pybuffernd_f.diminfo[0].shape; + __pyx_v_val = (*__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_f.diminfo[0].strides)); + } + __pyx_L13:; + + /* "astropy/convolution/boundary_fill.pyx":53 + * else: + * val = f[ii] + * ker = g[(nkx - 1 - (wkx + ii - i))] # <<<<<<<<<<<<<< + * if not npy_isnan(val): + * top += val * ker + */ + __pyx_t_13 = ((unsigned int)((__pyx_v_nkx - 1) - ((__pyx_v_wkx + __pyx_v_ii) - __pyx_v_i))); + __pyx_v_ker = (*__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_g.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_g.diminfo[0].strides)); + + /* "astropy/convolution/boundary_fill.pyx":54 + * val = f[ii] + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + __pyx_t_1 = ((!(npy_isnan(__pyx_v_val) != 0)) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":55 + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): + * top += val * ker # <<<<<<<<<<<<<< + * bot += ker + * if normalize_by_kernel: + */ + __pyx_v_top = (__pyx_v_top + (__pyx_v_val * __pyx_v_ker)); + + /* "astropy/convolution/boundary_fill.pyx":56 + * if not npy_isnan(val): + * top += val * ker + * bot += ker # <<<<<<<<<<<<<< + * if normalize_by_kernel: + * if bot == 0: + */ + __pyx_v_bot = (__pyx_v_bot + __pyx_v_ker); + + /* "astropy/convolution/boundary_fill.pyx":54 + * val = f[ii] + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + } + } + + /* "astropy/convolution/boundary_fill.pyx":57 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i] = f[i] + */ + __pyx_t_1 = (__pyx_v_normalize_by_kernel != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":58 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i] = f[i] + * else: + */ + __pyx_t_1 = ((__pyx_v_bot == 0.0) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":59 + * if normalize_by_kernel: + * if bot == 0: + * conv[i] = f[i] # <<<<<<<<<<<<<< + * else: + * conv[i] = top / bot + */ + __pyx_t_14 = __pyx_v_i; + __pyx_t_15 = __pyx_v_i; + *__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_conv.diminfo[0].strides) = (*__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_f.diminfo[0].strides)); + + /* "astropy/convolution/boundary_fill.pyx":58 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i] = f[i] + * else: + */ + goto __pyx_L18; + } + + /* "astropy/convolution/boundary_fill.pyx":61 + * conv[i] = f[i] + * else: + * conv[i] = top / bot # <<<<<<<<<<<<<< + * else: + * conv[i] = top + */ + /*else*/ { + if (unlikely(__pyx_v_bot == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 61, __pyx_L7_error) + } + __pyx_t_16 = __pyx_v_i; + *__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_conv.diminfo[0].strides) = (__pyx_v_top / __pyx_v_bot); + } + __pyx_L18:; + + /* "astropy/convolution/boundary_fill.pyx":57 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i] = f[i] + */ + goto __pyx_L17; + } + + /* "astropy/convolution/boundary_fill.pyx":63 + * conv[i] = top / bot + * else: + * conv[i] = top # <<<<<<<<<<<<<< + * # GIL acquired again here + * return conv + */ + /*else*/ { + __pyx_t_17 = __pyx_v_i; + *__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_conv.diminfo[0].strides) = __pyx_v_top; + } + __pyx_L17:; + } + } + + /* "astropy/convolution/boundary_fill.pyx":40 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L8; + } + __pyx_L7_error: { + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L1_error; + } + __pyx_L8:; + } + } + + /* "astropy/convolution/boundary_fill.pyx":65 + * conv[i] = top + * # GIL acquired again here + * return conv # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_conv)); + __pyx_r = ((PyObject *)__pyx_v_conv); + goto __pyx_L0; + + /* "astropy/convolution/boundary_fill.pyx":17 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_fill(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * float fill_value, + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_6); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("astropy.convolution.boundary_fill.convolve1d_boundary_fill", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_conv); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/convolution/boundary_fill.pyx":69 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_fill(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * float fill_value, + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_fill_3convolve2d_boundary_fill(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_11convolution_13boundary_fill_3convolve2d_boundary_fill = {"convolve2d_boundary_fill", (PyCFunction)__pyx_pw_7astropy_11convolution_13boundary_fill_3convolve2d_boundary_fill, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_fill_3convolve2d_boundary_fill(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_f = 0; + PyArrayObject *__pyx_v_g = 0; + float __pyx_v_fill_value; + int __pyx_v_normalize_by_kernel; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("convolve2d_boundary_fill (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_f,&__pyx_n_s_g,&__pyx_n_s_fill_value,&__pyx_n_s_normalize_by_kernel,0}; + PyObject* values[4] = {0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_f)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_g)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve2d_boundary_fill", 1, 4, 4, 1); __PYX_ERR(0, 69, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fill_value)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve2d_boundary_fill", 1, 4, 4, 2); __PYX_ERR(0, 69, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_normalize_by_kernel)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve2d_boundary_fill", 1, 4, 4, 3); __PYX_ERR(0, 69, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "convolve2d_boundary_fill") < 0)) __PYX_ERR(0, 69, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + } + __pyx_v_f = ((PyArrayObject *)values[0]); + __pyx_v_g = ((PyArrayObject *)values[1]); + __pyx_v_fill_value = __pyx_PyFloat_AsFloat(values[2]); if (unlikely((__pyx_v_fill_value == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 71, __pyx_L3_error) + __pyx_v_normalize_by_kernel = __Pyx_PyObject_IsTrue(values[3]); if (unlikely((__pyx_v_normalize_by_kernel == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 72, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("convolve2d_boundary_fill", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 69, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.convolution.boundary_fill.convolve2d_boundary_fill", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_f), __pyx_ptype_5numpy_ndarray, 1, "f", 0))) __PYX_ERR(0, 69, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_g), __pyx_ptype_5numpy_ndarray, 1, "g", 0))) __PYX_ERR(0, 70, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_11convolution_13boundary_fill_2convolve2d_boundary_fill(__pyx_self, __pyx_v_f, __pyx_v_g, __pyx_v_fill_value, __pyx_v_normalize_by_kernel); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_fill_2convolve2d_boundary_fill(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, float __pyx_v_fill_value, int __pyx_v_normalize_by_kernel) { + int __pyx_v_nx; + int __pyx_v_ny; + int __pyx_v_nkx; + int __pyx_v_nky; + int __pyx_v_wkx; + int __pyx_v_wky; + PyArrayObject *__pyx_v_conv = 0; + unsigned int __pyx_v_i; + unsigned int __pyx_v_j; + int __pyx_v_ii; + int __pyx_v_jj; + int __pyx_v_iimin; + int __pyx_v_iimax; + int __pyx_v_jjmin; + int __pyx_v_jjmax; + __pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t __pyx_v_top; + __pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t __pyx_v_bot; + __pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t __pyx_v_ker; + __pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t __pyx_v_val; + __Pyx_LocalBuf_ND __pyx_pybuffernd_conv; + __Pyx_Buffer __pyx_pybuffer_conv; + __Pyx_LocalBuf_ND __pyx_pybuffernd_f; + __Pyx_Buffer __pyx_pybuffer_f; + __Pyx_LocalBuf_ND __pyx_pybuffernd_g; + __Pyx_Buffer __pyx_pybuffer_g; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyArrayObject *__pyx_t_7 = NULL; + int __pyx_t_8; + unsigned int __pyx_t_9; + int __pyx_t_10; + unsigned int __pyx_t_11; + int __pyx_t_12; + int __pyx_t_13; + int __pyx_t_14; + int __pyx_t_15; + Py_ssize_t __pyx_t_16; + Py_ssize_t __pyx_t_17; + size_t __pyx_t_18; + size_t __pyx_t_19; + size_t __pyx_t_20; + size_t __pyx_t_21; + size_t __pyx_t_22; + size_t __pyx_t_23; + size_t __pyx_t_24; + size_t __pyx_t_25; + size_t __pyx_t_26; + size_t __pyx_t_27; + __Pyx_RefNannySetupContext("convolve2d_boundary_fill", 0); + __pyx_pybuffer_conv.pybuffer.buf = NULL; + __pyx_pybuffer_conv.refcount = 0; + __pyx_pybuffernd_conv.data = NULL; + __pyx_pybuffernd_conv.rcbuffer = &__pyx_pybuffer_conv; + __pyx_pybuffer_f.pybuffer.buf = NULL; + __pyx_pybuffer_f.refcount = 0; + __pyx_pybuffernd_f.data = NULL; + __pyx_pybuffernd_f.rcbuffer = &__pyx_pybuffer_f; + __pyx_pybuffer_g.pybuffer.buf = NULL; + __pyx_pybuffer_g.refcount = 0; + __pyx_pybuffernd_g.data = NULL; + __pyx_pybuffernd_g.rcbuffer = &__pyx_pybuffer_g; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_f.rcbuffer->pybuffer, (PyObject*)__pyx_v_f, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 69, __pyx_L1_error) + } + __pyx_pybuffernd_f.diminfo[0].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_f.diminfo[0].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_f.diminfo[1].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_f.diminfo[1].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[1]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_g.rcbuffer->pybuffer, (PyObject*)__pyx_v_g, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 69, __pyx_L1_error) + } + __pyx_pybuffernd_g.diminfo[0].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_g.diminfo[0].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_g.diminfo[1].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_g.diminfo[1].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[1]; + + /* "astropy/convolution/boundary_fill.pyx":75 + * ): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[0]), 2) != 1) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[1]), 2) != 1) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":76 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 76, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(0, 76, __pyx_L1_error) + + /* "astropy/convolution/boundary_fill.pyx":75 + * ): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + } + + /* "astropy/convolution/boundary_fill.pyx":78 + * raise ValueError("Convolution kernel must have odd dimensions") + * + * assert f.dtype == DTYPE and g.dtype == DTYPE # <<<<<<<<<<<<<< + * + * cdef int nx = f.shape[0] + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_f), __pyx_n_s_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L6_bool_binop_done; + } + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_g), __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_1 = __pyx_t_2; + __pyx_L6_bool_binop_done:; + if (unlikely(!__pyx_t_1)) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 78, __pyx_L1_error) + } + } + #endif + + /* "astropy/convolution/boundary_fill.pyx":80 + * assert f.dtype == DTYPE and g.dtype == DTYPE + * + * cdef int nx = f.shape[0] # <<<<<<<<<<<<<< + * cdef int ny = f.shape[1] + * cdef int nkx = g.shape[0] + */ + __pyx_v_nx = (__pyx_v_f->dimensions[0]); + + /* "astropy/convolution/boundary_fill.pyx":81 + * + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] # <<<<<<<<<<<<<< + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + */ + __pyx_v_ny = (__pyx_v_f->dimensions[1]); + + /* "astropy/convolution/boundary_fill.pyx":82 + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] + * cdef int nkx = g.shape[0] # <<<<<<<<<<<<<< + * cdef int nky = g.shape[1] + * cdef int wkx = nkx // 2 + */ + __pyx_v_nkx = (__pyx_v_g->dimensions[0]); + + /* "astropy/convolution/boundary_fill.pyx":83 + * cdef int ny = f.shape[1] + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] # <<<<<<<<<<<<<< + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + */ + __pyx_v_nky = (__pyx_v_g->dimensions[1]); + + /* "astropy/convolution/boundary_fill.pyx":84 + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + * cdef int wkx = nkx // 2 # <<<<<<<<<<<<<< + * cdef int wky = nky // 2 + * cdef np.ndarray[DTYPE_t, ndim=2] conv = np.empty([nx, ny], dtype=DTYPE) + */ + __pyx_v_wkx = __Pyx_div_long(__pyx_v_nkx, 2); + + /* "astropy/convolution/boundary_fill.pyx":85 + * cdef int nky = g.shape[1] + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 # <<<<<<<<<<<<<< + * cdef np.ndarray[DTYPE_t, ndim=2] conv = np.empty([nx, ny], dtype=DTYPE) + * cdef unsigned int i, j, iii, jjj + */ + __pyx_v_wky = __Pyx_div_long(__pyx_v_nky, 2); + + /* "astropy/convolution/boundary_fill.pyx":86 + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + * cdef np.ndarray[DTYPE_t, ndim=2] conv = np.empty([nx, ny], dtype=DTYPE) # <<<<<<<<<<<<<< + * cdef unsigned int i, j, iii, jjj + * cdef int ii, jj + */ + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_ny); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_3); + PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_5); + PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_5); + __pyx_t_3 = 0; + __pyx_t_5 = 0; + __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 86, __pyx_L1_error) + __pyx_t_7 = ((PyArrayObject *)__pyx_t_3); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_conv.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { + __pyx_v_conv = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 86, __pyx_L1_error) + } else {__pyx_pybuffernd_conv.diminfo[0].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_conv.diminfo[0].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_conv.diminfo[1].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_conv.diminfo[1].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[1]; + } + } + __pyx_t_7 = 0; + __pyx_v_conv = ((PyArrayObject *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "astropy/convolution/boundary_fill.pyx":95 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # now run the proper convolution + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + + /* "astropy/convolution/boundary_fill.pyx":98 + * + * # now run the proper convolution + * for i in range(nx): # <<<<<<<<<<<<<< + * for j in range(ny): + * top = 0. + */ + __pyx_t_8 = __pyx_v_nx; + for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { + __pyx_v_i = __pyx_t_9; + + /* "astropy/convolution/boundary_fill.pyx":99 + * # now run the proper convolution + * for i in range(nx): + * for j in range(ny): # <<<<<<<<<<<<<< + * top = 0. + * bot = 0. + */ + __pyx_t_10 = __pyx_v_ny; + for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { + __pyx_v_j = __pyx_t_11; + + /* "astropy/convolution/boundary_fill.pyx":100 + * for i in range(nx): + * for j in range(ny): + * top = 0. # <<<<<<<<<<<<<< + * bot = 0. + * iimin = i - wkx + */ + __pyx_v_top = 0.; + + /* "astropy/convolution/boundary_fill.pyx":101 + * for j in range(ny): + * top = 0. + * bot = 0. # <<<<<<<<<<<<<< + * iimin = i - wkx + * iimax = i + wkx + 1 + */ + __pyx_v_bot = 0.; + + /* "astropy/convolution/boundary_fill.pyx":102 + * top = 0. + * bot = 0. + * iimin = i - wkx # <<<<<<<<<<<<<< + * iimax = i + wkx + 1 + * jjmin = j - wky + */ + __pyx_v_iimin = (__pyx_v_i - __pyx_v_wkx); + + /* "astropy/convolution/boundary_fill.pyx":103 + * bot = 0. + * iimin = i - wkx + * iimax = i + wkx + 1 # <<<<<<<<<<<<<< + * jjmin = j - wky + * jjmax = j + wky + 1 + */ + __pyx_v_iimax = ((__pyx_v_i + __pyx_v_wkx) + 1); + + /* "astropy/convolution/boundary_fill.pyx":104 + * iimin = i - wkx + * iimax = i + wkx + 1 + * jjmin = j - wky # <<<<<<<<<<<<<< + * jjmax = j + wky + 1 + * for ii in range(iimin, iimax): + */ + __pyx_v_jjmin = (__pyx_v_j - __pyx_v_wky); + + /* "astropy/convolution/boundary_fill.pyx":105 + * iimax = i + wkx + 1 + * jjmin = j - wky + * jjmax = j + wky + 1 # <<<<<<<<<<<<<< + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): + */ + __pyx_v_jjmax = ((__pyx_v_j + __pyx_v_wky) + 1); + + /* "astropy/convolution/boundary_fill.pyx":106 + * jjmin = j - wky + * jjmax = j + wky + 1 + * for ii in range(iimin, iimax): # <<<<<<<<<<<<<< + * for jj in range(jjmin, jjmax): + * if ii < 0 or ii > nx - 1 or jj < 0 or jj > ny - 1: + */ + __pyx_t_12 = __pyx_v_iimax; + for (__pyx_t_13 = __pyx_v_iimin; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { + __pyx_v_ii = __pyx_t_13; + + /* "astropy/convolution/boundary_fill.pyx":107 + * jjmax = j + wky + 1 + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): # <<<<<<<<<<<<<< + * if ii < 0 or ii > nx - 1 or jj < 0 or jj > ny - 1: + * val = fill_value + */ + __pyx_t_14 = __pyx_v_jjmax; + for (__pyx_t_15 = __pyx_v_jjmin; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { + __pyx_v_jj = __pyx_t_15; + + /* "astropy/convolution/boundary_fill.pyx":108 + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): + * if ii < 0 or ii > nx - 1 or jj < 0 or jj > ny - 1: # <<<<<<<<<<<<<< + * val = fill_value + * else: + */ + __pyx_t_2 = ((__pyx_v_ii < 0) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L20_bool_binop_done; + } + __pyx_t_2 = ((__pyx_v_ii > (__pyx_v_nx - 1)) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L20_bool_binop_done; + } + __pyx_t_2 = ((__pyx_v_jj < 0) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L20_bool_binop_done; + } + __pyx_t_2 = ((__pyx_v_jj > (__pyx_v_ny - 1)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L20_bool_binop_done:; + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":109 + * for jj in range(jjmin, jjmax): + * if ii < 0 or ii > nx - 1 or jj < 0 or jj > ny - 1: + * val = fill_value # <<<<<<<<<<<<<< + * else: + * val = f[ii, jj] + */ + __pyx_v_val = __pyx_v_fill_value; + + /* "astropy/convolution/boundary_fill.pyx":108 + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): + * if ii < 0 or ii > nx - 1 or jj < 0 or jj > ny - 1: # <<<<<<<<<<<<<< + * val = fill_value + * else: + */ + goto __pyx_L19; + } + + /* "astropy/convolution/boundary_fill.pyx":111 + * val = fill_value + * else: + * val = f[ii, jj] # <<<<<<<<<<<<<< + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j))] + */ + /*else*/ { + __pyx_t_16 = __pyx_v_ii; + __pyx_t_17 = __pyx_v_jj; + if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_pybuffernd_f.diminfo[0].shape; + if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_pybuffernd_f.diminfo[1].shape; + __pyx_v_val = (*__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_f.diminfo[1].strides)); + } + __pyx_L19:; + + /* "astropy/convolution/boundary_fill.pyx":112 + * else: + * val = f[ii, jj] + * ker = g[(nkx - 1 - (wkx + ii - i)), # <<<<<<<<<<<<<< + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): + */ + __pyx_t_18 = ((unsigned int)((__pyx_v_nkx - 1) - ((__pyx_v_wkx + __pyx_v_ii) - __pyx_v_i))); + __pyx_t_19 = ((unsigned int)((__pyx_v_nky - 1) - ((__pyx_v_wky + __pyx_v_jj) - __pyx_v_j))); + __pyx_v_ker = (*__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_g.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_g.diminfo[0].strides, __pyx_t_19, __pyx_pybuffernd_g.diminfo[1].strides)); + + /* "astropy/convolution/boundary_fill.pyx":114 + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + __pyx_t_1 = ((!(npy_isnan(__pyx_v_val) != 0)) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":115 + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): + * top += val * ker # <<<<<<<<<<<<<< + * bot += ker + * if normalize_by_kernel: + */ + __pyx_v_top = (__pyx_v_top + (__pyx_v_val * __pyx_v_ker)); + + /* "astropy/convolution/boundary_fill.pyx":116 + * if not npy_isnan(val): + * top += val * ker + * bot += ker # <<<<<<<<<<<<<< + * if normalize_by_kernel: + * if bot == 0: + */ + __pyx_v_bot = (__pyx_v_bot + __pyx_v_ker); + + /* "astropy/convolution/boundary_fill.pyx":114 + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + } + } + } + + /* "astropy/convolution/boundary_fill.pyx":117 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j] = f[i, j] + */ + __pyx_t_1 = (__pyx_v_normalize_by_kernel != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":118 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j] = f[i, j] + * else: + */ + __pyx_t_1 = ((__pyx_v_bot == 0.0) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":119 + * if normalize_by_kernel: + * if bot == 0: + * conv[i, j] = f[i, j] # <<<<<<<<<<<<<< + * else: + * conv[i, j] = top / bot + */ + __pyx_t_20 = __pyx_v_i; + __pyx_t_21 = __pyx_v_j; + __pyx_t_22 = __pyx_v_i; + __pyx_t_23 = __pyx_v_j; + *__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_23, __pyx_pybuffernd_conv.diminfo[1].strides) = (*__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_20, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_21, __pyx_pybuffernd_f.diminfo[1].strides)); + + /* "astropy/convolution/boundary_fill.pyx":118 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j] = f[i, j] + * else: + */ + goto __pyx_L26; + } + + /* "astropy/convolution/boundary_fill.pyx":121 + * conv[i, j] = f[i, j] + * else: + * conv[i, j] = top / bot # <<<<<<<<<<<<<< + * else: + * conv[i, j] = top + */ + /*else*/ { + if (unlikely(__pyx_v_bot == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 121, __pyx_L9_error) + } + __pyx_t_24 = __pyx_v_i; + __pyx_t_25 = __pyx_v_j; + *__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_24, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_25, __pyx_pybuffernd_conv.diminfo[1].strides) = (__pyx_v_top / __pyx_v_bot); + } + __pyx_L26:; + + /* "astropy/convolution/boundary_fill.pyx":117 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j] = f[i, j] + */ + goto __pyx_L25; + } + + /* "astropy/convolution/boundary_fill.pyx":123 + * conv[i, j] = top / bot + * else: + * conv[i, j] = top # <<<<<<<<<<<<<< + * # GIL acquired again here + * return conv + */ + /*else*/ { + __pyx_t_26 = __pyx_v_i; + __pyx_t_27 = __pyx_v_j; + *__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_26, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_27, __pyx_pybuffernd_conv.diminfo[1].strides) = __pyx_v_top; + } + __pyx_L25:; + } + } + } + + /* "astropy/convolution/boundary_fill.pyx":95 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # now run the proper convolution + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L10; + } + __pyx_L9_error: { + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L1_error; + } + __pyx_L10:; + } + } + + /* "astropy/convolution/boundary_fill.pyx":125 + * conv[i, j] = top + * # GIL acquired again here + * return conv # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_conv)); + __pyx_r = ((PyObject *)__pyx_v_conv); + goto __pyx_L0; + + /* "astropy/convolution/boundary_fill.pyx":69 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_fill(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * float fill_value, + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("astropy.convolution.boundary_fill.convolve2d_boundary_fill", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_conv); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/convolution/boundary_fill.pyx":129 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_fill(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * float fill_value, + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_fill_5convolve3d_boundary_fill(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_11convolution_13boundary_fill_5convolve3d_boundary_fill = {"convolve3d_boundary_fill", (PyCFunction)__pyx_pw_7astropy_11convolution_13boundary_fill_5convolve3d_boundary_fill, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_fill_5convolve3d_boundary_fill(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_f = 0; + PyArrayObject *__pyx_v_g = 0; + float __pyx_v_fill_value; + int __pyx_v_normalize_by_kernel; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("convolve3d_boundary_fill (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_f,&__pyx_n_s_g,&__pyx_n_s_fill_value,&__pyx_n_s_normalize_by_kernel,0}; + PyObject* values[4] = {0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_f)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_g)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve3d_boundary_fill", 1, 4, 4, 1); __PYX_ERR(0, 129, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fill_value)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve3d_boundary_fill", 1, 4, 4, 2); __PYX_ERR(0, 129, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_normalize_by_kernel)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve3d_boundary_fill", 1, 4, 4, 3); __PYX_ERR(0, 129, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "convolve3d_boundary_fill") < 0)) __PYX_ERR(0, 129, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + } + __pyx_v_f = ((PyArrayObject *)values[0]); + __pyx_v_g = ((PyArrayObject *)values[1]); + __pyx_v_fill_value = __pyx_PyFloat_AsFloat(values[2]); if (unlikely((__pyx_v_fill_value == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 131, __pyx_L3_error) + __pyx_v_normalize_by_kernel = __Pyx_PyObject_IsTrue(values[3]); if (unlikely((__pyx_v_normalize_by_kernel == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 132, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("convolve3d_boundary_fill", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 129, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.convolution.boundary_fill.convolve3d_boundary_fill", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_f), __pyx_ptype_5numpy_ndarray, 1, "f", 0))) __PYX_ERR(0, 129, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_g), __pyx_ptype_5numpy_ndarray, 1, "g", 0))) __PYX_ERR(0, 130, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_11convolution_13boundary_fill_4convolve3d_boundary_fill(__pyx_self, __pyx_v_f, __pyx_v_g, __pyx_v_fill_value, __pyx_v_normalize_by_kernel); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_fill_4convolve3d_boundary_fill(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, float __pyx_v_fill_value, int __pyx_v_normalize_by_kernel) { + int __pyx_v_nx; + int __pyx_v_ny; + int __pyx_v_nz; + int __pyx_v_nkx; + int __pyx_v_nky; + int __pyx_v_nkz; + int __pyx_v_wkx; + int __pyx_v_wky; + int __pyx_v_wkz; + PyArrayObject *__pyx_v_conv = 0; + unsigned int __pyx_v_i; + unsigned int __pyx_v_j; + unsigned int __pyx_v_k; + int __pyx_v_ii; + int __pyx_v_jj; + int __pyx_v_kk; + int __pyx_v_iimin; + int __pyx_v_iimax; + int __pyx_v_jjmin; + int __pyx_v_jjmax; + int __pyx_v_kkmin; + int __pyx_v_kkmax; + __pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t __pyx_v_top; + __pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t __pyx_v_bot; + __pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t __pyx_v_ker; + __pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t __pyx_v_val; + __Pyx_LocalBuf_ND __pyx_pybuffernd_conv; + __Pyx_Buffer __pyx_pybuffer_conv; + __Pyx_LocalBuf_ND __pyx_pybuffernd_f; + __Pyx_Buffer __pyx_pybuffer_f; + __Pyx_LocalBuf_ND __pyx_pybuffernd_g; + __Pyx_Buffer __pyx_pybuffer_g; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyArrayObject *__pyx_t_8 = NULL; + int __pyx_t_9; + unsigned int __pyx_t_10; + int __pyx_t_11; + unsigned int __pyx_t_12; + int __pyx_t_13; + unsigned int __pyx_t_14; + int __pyx_t_15; + int __pyx_t_16; + int __pyx_t_17; + int __pyx_t_18; + int __pyx_t_19; + int __pyx_t_20; + Py_ssize_t __pyx_t_21; + Py_ssize_t __pyx_t_22; + Py_ssize_t __pyx_t_23; + size_t __pyx_t_24; + size_t __pyx_t_25; + size_t __pyx_t_26; + size_t __pyx_t_27; + size_t __pyx_t_28; + size_t __pyx_t_29; + size_t __pyx_t_30; + size_t __pyx_t_31; + size_t __pyx_t_32; + size_t __pyx_t_33; + size_t __pyx_t_34; + size_t __pyx_t_35; + size_t __pyx_t_36; + size_t __pyx_t_37; + size_t __pyx_t_38; + __Pyx_RefNannySetupContext("convolve3d_boundary_fill", 0); + __pyx_pybuffer_conv.pybuffer.buf = NULL; + __pyx_pybuffer_conv.refcount = 0; + __pyx_pybuffernd_conv.data = NULL; + __pyx_pybuffernd_conv.rcbuffer = &__pyx_pybuffer_conv; + __pyx_pybuffer_f.pybuffer.buf = NULL; + __pyx_pybuffer_f.refcount = 0; + __pyx_pybuffernd_f.data = NULL; + __pyx_pybuffernd_f.rcbuffer = &__pyx_pybuffer_f; + __pyx_pybuffer_g.pybuffer.buf = NULL; + __pyx_pybuffer_g.refcount = 0; + __pyx_pybuffernd_g.data = NULL; + __pyx_pybuffernd_g.rcbuffer = &__pyx_pybuffer_g; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_f.rcbuffer->pybuffer, (PyObject*)__pyx_v_f, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 129, __pyx_L1_error) + } + __pyx_pybuffernd_f.diminfo[0].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_f.diminfo[0].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_f.diminfo[1].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_f.diminfo[1].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_f.diminfo[2].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_f.diminfo[2].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[2]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_g.rcbuffer->pybuffer, (PyObject*)__pyx_v_g, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 129, __pyx_L1_error) + } + __pyx_pybuffernd_g.diminfo[0].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_g.diminfo[0].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_g.diminfo[1].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_g.diminfo[1].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_g.diminfo[2].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_g.diminfo[2].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[2]; + + /* "astropy/convolution/boundary_fill.pyx":134 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[0]), 2) != 1) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[1]), 2) != 1) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[2]), 2) != 1) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":135 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 135, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(0, 135, __pyx_L1_error) + + /* "astropy/convolution/boundary_fill.pyx":134 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + } + + /* "astropy/convolution/boundary_fill.pyx":137 + * raise ValueError("Convolution kernel must have odd dimensions") + * + * assert f.dtype == DTYPE and g.dtype == DTYPE # <<<<<<<<<<<<<< + * + * cdef int nx = f.shape[0] + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_f), __pyx_n_s_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_g), __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_1 = __pyx_t_2; + __pyx_L7_bool_binop_done:; + if (unlikely(!__pyx_t_1)) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 137, __pyx_L1_error) + } + } + #endif + + /* "astropy/convolution/boundary_fill.pyx":139 + * assert f.dtype == DTYPE and g.dtype == DTYPE + * + * cdef int nx = f.shape[0] # <<<<<<<<<<<<<< + * cdef int ny = f.shape[1] + * cdef int nz = f.shape[2] + */ + __pyx_v_nx = (__pyx_v_f->dimensions[0]); + + /* "astropy/convolution/boundary_fill.pyx":140 + * + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] # <<<<<<<<<<<<<< + * cdef int nz = f.shape[2] + * cdef int nkx = g.shape[0] + */ + __pyx_v_ny = (__pyx_v_f->dimensions[1]); + + /* "astropy/convolution/boundary_fill.pyx":141 + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] + * cdef int nz = f.shape[2] # <<<<<<<<<<<<<< + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + */ + __pyx_v_nz = (__pyx_v_f->dimensions[2]); + + /* "astropy/convolution/boundary_fill.pyx":142 + * cdef int ny = f.shape[1] + * cdef int nz = f.shape[2] + * cdef int nkx = g.shape[0] # <<<<<<<<<<<<<< + * cdef int nky = g.shape[1] + * cdef int nkz = g.shape[2] + */ + __pyx_v_nkx = (__pyx_v_g->dimensions[0]); + + /* "astropy/convolution/boundary_fill.pyx":143 + * cdef int nz = f.shape[2] + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] # <<<<<<<<<<<<<< + * cdef int nkz = g.shape[2] + * cdef int wkx = nkx // 2 + */ + __pyx_v_nky = (__pyx_v_g->dimensions[1]); + + /* "astropy/convolution/boundary_fill.pyx":144 + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + * cdef int nkz = g.shape[2] # <<<<<<<<<<<<<< + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + */ + __pyx_v_nkz = (__pyx_v_g->dimensions[2]); + + /* "astropy/convolution/boundary_fill.pyx":145 + * cdef int nky = g.shape[1] + * cdef int nkz = g.shape[2] + * cdef int wkx = nkx // 2 # <<<<<<<<<<<<<< + * cdef int wky = nky // 2 + * cdef int wkz = nkz // 2 + */ + __pyx_v_wkx = __Pyx_div_long(__pyx_v_nkx, 2); + + /* "astropy/convolution/boundary_fill.pyx":146 + * cdef int nkz = g.shape[2] + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 # <<<<<<<<<<<<<< + * cdef int wkz = nkz // 2 + * cdef np.ndarray[DTYPE_t, ndim=3] conv = np.empty([nx, ny, nz], dtype=DTYPE) + */ + __pyx_v_wky = __Pyx_div_long(__pyx_v_nky, 2); + + /* "astropy/convolution/boundary_fill.pyx":147 + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + * cdef int wkz = nkz // 2 # <<<<<<<<<<<<<< + * cdef np.ndarray[DTYPE_t, ndim=3] conv = np.empty([nx, ny, nz], dtype=DTYPE) + * cdef unsigned int i, j, k, iii, jjj, kkk + */ + __pyx_v_wkz = __Pyx_div_long(__pyx_v_nkz, 2); + + /* "astropy/convolution/boundary_fill.pyx":148 + * cdef int wky = nky // 2 + * cdef int wkz = nkz // 2 + * cdef np.ndarray[DTYPE_t, ndim=3] conv = np.empty([nx, ny, nz], dtype=DTYPE) # <<<<<<<<<<<<<< + * cdef unsigned int i, j, k, iii, jjj, kkk + * cdef int ii, jj, kk + */ + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_ny); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_nz); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_GIVEREF(__pyx_t_3); + PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_5); + PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_6); + PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_6); + __pyx_t_3 = 0; + __pyx_t_5 = 0; + __pyx_t_6 = 0; + __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_7); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); + __pyx_t_7 = 0; + __pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, __pyx_t_7); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 148, __pyx_L1_error) + __pyx_t_8 = ((PyArrayObject *)__pyx_t_5); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_conv.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { + __pyx_v_conv = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 148, __pyx_L1_error) + } else {__pyx_pybuffernd_conv.diminfo[0].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_conv.diminfo[0].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_conv.diminfo[1].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_conv.diminfo[1].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_conv.diminfo[2].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_conv.diminfo[2].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[2]; + } + } + __pyx_t_8 = 0; + __pyx_v_conv = ((PyArrayObject *)__pyx_t_5); + __pyx_t_5 = 0; + + /* "astropy/convolution/boundary_fill.pyx":157 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + + /* "astropy/convolution/boundary_fill.pyx":160 + * + * # Now run the proper convolution + * for i in range(nx): # <<<<<<<<<<<<<< + * for j in range(ny): + * for k in range(nz): + */ + __pyx_t_9 = __pyx_v_nx; + for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { + __pyx_v_i = __pyx_t_10; + + /* "astropy/convolution/boundary_fill.pyx":161 + * # Now run the proper convolution + * for i in range(nx): + * for j in range(ny): # <<<<<<<<<<<<<< + * for k in range(nz): + * top = 0. + */ + __pyx_t_11 = __pyx_v_ny; + for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { + __pyx_v_j = __pyx_t_12; + + /* "astropy/convolution/boundary_fill.pyx":162 + * for i in range(nx): + * for j in range(ny): + * for k in range(nz): # <<<<<<<<<<<<<< + * top = 0. + * bot = 0. + */ + __pyx_t_13 = __pyx_v_nz; + for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { + __pyx_v_k = __pyx_t_14; + + /* "astropy/convolution/boundary_fill.pyx":163 + * for j in range(ny): + * for k in range(nz): + * top = 0. # <<<<<<<<<<<<<< + * bot = 0. + * iimin = i - wkx + */ + __pyx_v_top = 0.; + + /* "astropy/convolution/boundary_fill.pyx":164 + * for k in range(nz): + * top = 0. + * bot = 0. # <<<<<<<<<<<<<< + * iimin = i - wkx + * iimax = i + wkx + 1 + */ + __pyx_v_bot = 0.; + + /* "astropy/convolution/boundary_fill.pyx":165 + * top = 0. + * bot = 0. + * iimin = i - wkx # <<<<<<<<<<<<<< + * iimax = i + wkx + 1 + * jjmin = j - wky + */ + __pyx_v_iimin = (__pyx_v_i - __pyx_v_wkx); + + /* "astropy/convolution/boundary_fill.pyx":166 + * bot = 0. + * iimin = i - wkx + * iimax = i + wkx + 1 # <<<<<<<<<<<<<< + * jjmin = j - wky + * jjmax = j + wky + 1 + */ + __pyx_v_iimax = ((__pyx_v_i + __pyx_v_wkx) + 1); + + /* "astropy/convolution/boundary_fill.pyx":167 + * iimin = i - wkx + * iimax = i + wkx + 1 + * jjmin = j - wky # <<<<<<<<<<<<<< + * jjmax = j + wky + 1 + * kkmin = k - wkz + */ + __pyx_v_jjmin = (__pyx_v_j - __pyx_v_wky); + + /* "astropy/convolution/boundary_fill.pyx":168 + * iimax = i + wkx + 1 + * jjmin = j - wky + * jjmax = j + wky + 1 # <<<<<<<<<<<<<< + * kkmin = k - wkz + * kkmax = k + wkz + 1 + */ + __pyx_v_jjmax = ((__pyx_v_j + __pyx_v_wky) + 1); + + /* "astropy/convolution/boundary_fill.pyx":169 + * jjmin = j - wky + * jjmax = j + wky + 1 + * kkmin = k - wkz # <<<<<<<<<<<<<< + * kkmax = k + wkz + 1 + * for ii in range(iimin, iimax): + */ + __pyx_v_kkmin = (__pyx_v_k - __pyx_v_wkz); + + /* "astropy/convolution/boundary_fill.pyx":170 + * jjmax = j + wky + 1 + * kkmin = k - wkz + * kkmax = k + wkz + 1 # <<<<<<<<<<<<<< + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): + */ + __pyx_v_kkmax = ((__pyx_v_k + __pyx_v_wkz) + 1); + + /* "astropy/convolution/boundary_fill.pyx":171 + * kkmin = k - wkz + * kkmax = k + wkz + 1 + * for ii in range(iimin, iimax): # <<<<<<<<<<<<<< + * for jj in range(jjmin, jjmax): + * for kk in range(kkmin, kkmax): + */ + __pyx_t_15 = __pyx_v_iimax; + for (__pyx_t_16 = __pyx_v_iimin; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) { + __pyx_v_ii = __pyx_t_16; + + /* "astropy/convolution/boundary_fill.pyx":172 + * kkmax = k + wkz + 1 + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): # <<<<<<<<<<<<<< + * for kk in range(kkmin, kkmax): + * if ii < 0 or ii > nx - 1 or jj < 0 or jj > ny - 1 or kk < 0 or kk > nz - 1: + */ + __pyx_t_17 = __pyx_v_jjmax; + for (__pyx_t_18 = __pyx_v_jjmin; __pyx_t_18 < __pyx_t_17; __pyx_t_18+=1) { + __pyx_v_jj = __pyx_t_18; + + /* "astropy/convolution/boundary_fill.pyx":173 + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): + * for kk in range(kkmin, kkmax): # <<<<<<<<<<<<<< + * if ii < 0 or ii > nx - 1 or jj < 0 or jj > ny - 1 or kk < 0 or kk > nz - 1: + * val = fill_value + */ + __pyx_t_19 = __pyx_v_kkmax; + for (__pyx_t_20 = __pyx_v_kkmin; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) { + __pyx_v_kk = __pyx_t_20; + + /* "astropy/convolution/boundary_fill.pyx":174 + * for jj in range(jjmin, jjmax): + * for kk in range(kkmin, kkmax): + * if ii < 0 or ii > nx - 1 or jj < 0 or jj > ny - 1 or kk < 0 or kk > nz - 1: # <<<<<<<<<<<<<< + * val = fill_value + * else: + */ + __pyx_t_2 = ((__pyx_v_ii < 0) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L25_bool_binop_done; + } + __pyx_t_2 = ((__pyx_v_ii > (__pyx_v_nx - 1)) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L25_bool_binop_done; + } + __pyx_t_2 = ((__pyx_v_jj < 0) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L25_bool_binop_done; + } + __pyx_t_2 = ((__pyx_v_jj > (__pyx_v_ny - 1)) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L25_bool_binop_done; + } + __pyx_t_2 = ((__pyx_v_kk < 0) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L25_bool_binop_done; + } + __pyx_t_2 = ((__pyx_v_kk > (__pyx_v_nz - 1)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L25_bool_binop_done:; + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":175 + * for kk in range(kkmin, kkmax): + * if ii < 0 or ii > nx - 1 or jj < 0 or jj > ny - 1 or kk < 0 or kk > nz - 1: + * val = fill_value # <<<<<<<<<<<<<< + * else: + * val = f[ii, jj, kk] + */ + __pyx_v_val = __pyx_v_fill_value; + + /* "astropy/convolution/boundary_fill.pyx":174 + * for jj in range(jjmin, jjmax): + * for kk in range(kkmin, kkmax): + * if ii < 0 or ii > nx - 1 or jj < 0 or jj > ny - 1 or kk < 0 or kk > nz - 1: # <<<<<<<<<<<<<< + * val = fill_value + * else: + */ + goto __pyx_L24; + } + + /* "astropy/convolution/boundary_fill.pyx":177 + * val = fill_value + * else: + * val = f[ii, jj, kk] # <<<<<<<<<<<<<< + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j)), + */ + /*else*/ { + __pyx_t_21 = __pyx_v_ii; + __pyx_t_22 = __pyx_v_jj; + __pyx_t_23 = __pyx_v_kk; + if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_pybuffernd_f.diminfo[0].shape; + if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_pybuffernd_f.diminfo[1].shape; + if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_pybuffernd_f.diminfo[2].shape; + __pyx_v_val = (*__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_22, __pyx_pybuffernd_f.diminfo[1].strides, __pyx_t_23, __pyx_pybuffernd_f.diminfo[2].strides)); + } + __pyx_L24:; + + /* "astropy/convolution/boundary_fill.pyx":178 + * else: + * val = f[ii, jj, kk] + * ker = g[(nkx - 1 - (wkx + ii - i)), # <<<<<<<<<<<<<< + * (nky - 1 - (wky + jj - j)), + * (nkz - 1 - (wkz + kk - k))] + */ + __pyx_t_24 = ((unsigned int)((__pyx_v_nkx - 1) - ((__pyx_v_wkx + __pyx_v_ii) - __pyx_v_i))); + __pyx_t_25 = ((unsigned int)((__pyx_v_nky - 1) - ((__pyx_v_wky + __pyx_v_jj) - __pyx_v_j))); + __pyx_t_26 = ((unsigned int)((__pyx_v_nkz - 1) - ((__pyx_v_wkz + __pyx_v_kk) - __pyx_v_k))); + __pyx_v_ker = (*__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_g.rcbuffer->pybuffer.buf, __pyx_t_24, __pyx_pybuffernd_g.diminfo[0].strides, __pyx_t_25, __pyx_pybuffernd_g.diminfo[1].strides, __pyx_t_26, __pyx_pybuffernd_g.diminfo[2].strides)); + + /* "astropy/convolution/boundary_fill.pyx":181 + * (nky - 1 - (wky + jj - j)), + * (nkz - 1 - (wkz + kk - k))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + __pyx_t_1 = ((!(npy_isnan(__pyx_v_val) != 0)) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":182 + * (nkz - 1 - (wkz + kk - k))] + * if not npy_isnan(val): + * top += val * ker # <<<<<<<<<<<<<< + * bot += ker + * if normalize_by_kernel: + */ + __pyx_v_top = (__pyx_v_top + (__pyx_v_val * __pyx_v_ker)); + + /* "astropy/convolution/boundary_fill.pyx":183 + * if not npy_isnan(val): + * top += val * ker + * bot += ker # <<<<<<<<<<<<<< + * if normalize_by_kernel: + * if bot == 0: + */ + __pyx_v_bot = (__pyx_v_bot + __pyx_v_ker); + + /* "astropy/convolution/boundary_fill.pyx":181 + * (nky - 1 - (wky + jj - j)), + * (nkz - 1 - (wkz + kk - k))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + } + } + } + } + + /* "astropy/convolution/boundary_fill.pyx":184 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j, k] = f[i, j, k] + */ + __pyx_t_1 = (__pyx_v_normalize_by_kernel != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":185 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j, k] = f[i, j, k] + * else: + */ + __pyx_t_1 = ((__pyx_v_bot == 0.0) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_fill.pyx":186 + * if normalize_by_kernel: + * if bot == 0: + * conv[i, j, k] = f[i, j, k] # <<<<<<<<<<<<<< + * else: + * conv[i, j, k] = top / bot + */ + __pyx_t_27 = __pyx_v_i; + __pyx_t_28 = __pyx_v_j; + __pyx_t_29 = __pyx_v_k; + __pyx_t_30 = __pyx_v_i; + __pyx_t_31 = __pyx_v_j; + __pyx_t_32 = __pyx_v_k; + *__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_30, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_31, __pyx_pybuffernd_conv.diminfo[1].strides, __pyx_t_32, __pyx_pybuffernd_conv.diminfo[2].strides) = (*__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_27, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_28, __pyx_pybuffernd_f.diminfo[1].strides, __pyx_t_29, __pyx_pybuffernd_f.diminfo[2].strides)); + + /* "astropy/convolution/boundary_fill.pyx":185 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j, k] = f[i, j, k] + * else: + */ + goto __pyx_L33; + } + + /* "astropy/convolution/boundary_fill.pyx":188 + * conv[i, j, k] = f[i, j, k] + * else: + * conv[i, j, k] = top / bot # <<<<<<<<<<<<<< + * else: + * conv[i, j, k] = top + */ + /*else*/ { + if (unlikely(__pyx_v_bot == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 188, __pyx_L10_error) + } + __pyx_t_33 = __pyx_v_i; + __pyx_t_34 = __pyx_v_j; + __pyx_t_35 = __pyx_v_k; + *__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_33, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_34, __pyx_pybuffernd_conv.diminfo[1].strides, __pyx_t_35, __pyx_pybuffernd_conv.diminfo[2].strides) = (__pyx_v_top / __pyx_v_bot); + } + __pyx_L33:; + + /* "astropy/convolution/boundary_fill.pyx":184 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j, k] = f[i, j, k] + */ + goto __pyx_L32; + } + + /* "astropy/convolution/boundary_fill.pyx":190 + * conv[i, j, k] = top / bot + * else: + * conv[i, j, k] = top # <<<<<<<<<<<<<< + * # GIl acquired again here + * return conv + */ + /*else*/ { + __pyx_t_36 = __pyx_v_i; + __pyx_t_37 = __pyx_v_j; + __pyx_t_38 = __pyx_v_k; + *__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_fill_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_36, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_37, __pyx_pybuffernd_conv.diminfo[1].strides, __pyx_t_38, __pyx_pybuffernd_conv.diminfo[2].strides) = __pyx_v_top; + } + __pyx_L32:; + } + } + } + } + + /* "astropy/convolution/boundary_fill.pyx":157 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L11; + } + __pyx_L10_error: { + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L1_error; + } + __pyx_L11:; + } + } + + /* "astropy/convolution/boundary_fill.pyx":192 + * conv[i, j, k] = top + * # GIl acquired again here + * return conv # <<<<<<<<<<<<<< + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_conv)); + __pyx_r = ((PyObject *)__pyx_v_conv); + goto __pyx_L0; + + /* "astropy/convolution/boundary_fill.pyx":129 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_fill(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * float fill_value, + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("astropy.convolution.boundary_fill.convolve3d_boundary_fill", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_conv); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 + * # experimental exception made for __getbuffer__ and __releasebuffer__ + * # -- the details of this may change. + * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< + * # This implementation of getbuffer is geared towards Cython + * # requirements, and does not yet fullfill the PEP. + */ + +/* Python wrapper */ +static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_v_copy_shape; + int __pyx_v_i; + int __pyx_v_ndim; + int __pyx_v_endian_detector; + int __pyx_v_little_endian; + int __pyx_v_t; + char *__pyx_v_f; + PyArray_Descr *__pyx_v_descr = 0; + int __pyx_v_offset; + int __pyx_v_hasfields; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + char *__pyx_t_7; + __Pyx_RefNannySetupContext("__getbuffer__", 0); + if (__pyx_v_info != NULL) { + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":220 + * # of flags + * + * if info == NULL: return # <<<<<<<<<<<<<< + * + * cdef int copy_shape, i, ndim + */ + __pyx_t_1 = ((__pyx_v_info == NULL) != 0); + if (__pyx_t_1) { + __pyx_r = 0; + goto __pyx_L0; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":223 + * + * cdef int copy_shape, i, ndim + * cdef int endian_detector = 1 # <<<<<<<<<<<<<< + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * + */ + __pyx_v_endian_detector = 1; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":224 + * cdef int copy_shape, i, ndim + * cdef int endian_detector = 1 + * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< + * + * ndim = PyArray_NDIM(self) + */ + __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":226 + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * + * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 + * ndim = PyArray_NDIM(self) + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * copy_shape = 1 + * else: + */ + __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":229 + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * copy_shape = 1 # <<<<<<<<<<<<<< + * else: + * copy_shape = 0 + */ + __pyx_v_copy_shape = 1; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 + * ndim = PyArray_NDIM(self) + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * copy_shape = 1 + * else: + */ + goto __pyx_L4; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231 + * copy_shape = 1 + * else: + * copy_shape = 0 # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + */ + /*else*/ { + __pyx_v_copy_shape = 0; + } + __pyx_L4:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L6_bool_binop_done; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":234 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< + * raise ValueError(u"ndarray is not C contiguous") + * + */ + __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L6_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 235, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L9_bool_binop_done; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":238 + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< + * raise ValueError(u"ndarray is not Fortran contiguous") + * + */ + __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L9_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< + * + * info.buf = PyArray_DATA(self) + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 239, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 239, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":241 + * raise ValueError(u"ndarray is not Fortran contiguous") + * + * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< + * info.ndim = ndim + * if copy_shape: + */ + __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":242 + * + * info.buf = PyArray_DATA(self) + * info.ndim = ndim # <<<<<<<<<<<<<< + * if copy_shape: + * # Allocate new buffer for strides and shape info. + */ + __pyx_v_info->ndim = __pyx_v_ndim; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 + * info.buf = PyArray_DATA(self) + * info.ndim = ndim + * if copy_shape: # <<<<<<<<<<<<<< + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + */ + __pyx_t_1 = (__pyx_v_copy_shape != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":246 + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) # <<<<<<<<<<<<<< + * info.shape = info.strides + ndim + * for i in range(ndim): + */ + __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":247 + * # This is allocated as one block, strides first. + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) + * info.shape = info.strides + ndim # <<<<<<<<<<<<<< + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] + */ + __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":248 + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) + * info.shape = info.strides + ndim + * for i in range(ndim): # <<<<<<<<<<<<<< + * info.strides[i] = PyArray_STRIDES(self)[i] + * info.shape[i] = PyArray_DIMS(self)[i] + */ + __pyx_t_4 = __pyx_v_ndim; + for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { + __pyx_v_i = __pyx_t_5; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":249 + * info.shape = info.strides + ndim + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< + * info.shape[i] = PyArray_DIMS(self)[i] + * else: + */ + (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":250 + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] + * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< + * else: + * info.strides = PyArray_STRIDES(self) + */ + (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 + * info.buf = PyArray_DATA(self) + * info.ndim = ndim + * if copy_shape: # <<<<<<<<<<<<<< + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + */ + goto __pyx_L11; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":252 + * info.shape[i] = PyArray_DIMS(self)[i] + * else: + * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL + */ + /*else*/ { + __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":253 + * else: + * info.strides = PyArray_STRIDES(self) + * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) + */ + __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); + } + __pyx_L11:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":254 + * info.strides = PyArray_STRIDES(self) + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * info.itemsize = PyArray_ITEMSIZE(self) + * info.readonly = not PyArray_ISWRITEABLE(self) + */ + __pyx_v_info->suboffsets = NULL; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":255 + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< + * info.readonly = not PyArray_ISWRITEABLE(self) + * + */ + __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":256 + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) + * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< + * + * cdef int t + */ + __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":259 + * + * cdef int t + * cdef char* f = NULL # <<<<<<<<<<<<<< + * cdef dtype descr = self.descr + * cdef int offset + */ + __pyx_v_f = NULL; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":260 + * cdef int t + * cdef char* f = NULL + * cdef dtype descr = self.descr # <<<<<<<<<<<<<< + * cdef int offset + * + */ + __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); + __Pyx_INCREF(__pyx_t_3); + __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":263 + * cdef int offset + * + * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< + * + * if not hasfields and not copy_shape: + */ + __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 + * cdef bint hasfields = PyDataType_HASFIELDS(descr) + * + * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< + * # do not call releasebuffer + * info.obj = None + */ + __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L15_bool_binop_done; + } + __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L15_bool_binop_done:; + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":267 + * if not hasfields and not copy_shape: + * # do not call releasebuffer + * info.obj = None # <<<<<<<<<<<<<< + * else: + * # need to call releasebuffer + */ + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = Py_None; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 + * cdef bint hasfields = PyDataType_HASFIELDS(descr) + * + * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< + * # do not call releasebuffer + * info.obj = None + */ + goto __pyx_L14; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 + * else: + * # need to call releasebuffer + * info.obj = self # <<<<<<<<<<<<<< + * + * if not hasfields: + */ + /*else*/ { + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + } + __pyx_L14:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 + * info.obj = self + * + * if not hasfields: # <<<<<<<<<<<<<< + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + */ + __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":273 + * + * if not hasfields: + * t = descr.type_num # <<<<<<<<<<<<<< + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + */ + __pyx_t_4 = __pyx_v_descr->type_num; + __pyx_v_t = __pyx_t_4; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); + if (!__pyx_t_2) { + goto __pyx_L20_next_or; + } else { + } + __pyx_t_2 = (__pyx_v_little_endian != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L19_bool_binop_done; + } + __pyx_L20_next_or:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275 + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" + */ + __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L19_bool_binop_done; + } + __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L19_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 276, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":277 + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" + */ + switch (__pyx_v_t) { + case NPY_BYTE: + __pyx_v_f = ((char *)"b"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278 + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" + */ + case NPY_UBYTE: + __pyx_v_f = ((char *)"B"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279 + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" + */ + case NPY_SHORT: + __pyx_v_f = ((char *)"h"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" + */ + case NPY_USHORT: + __pyx_v_f = ((char *)"H"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":281 + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" + */ + case NPY_INT: + __pyx_v_f = ((char *)"i"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":282 + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" + */ + case NPY_UINT: + __pyx_v_f = ((char *)"I"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283 + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" + */ + case NPY_LONG: + __pyx_v_f = ((char *)"l"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284 + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" + */ + case NPY_ULONG: + __pyx_v_f = ((char *)"L"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":285 + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" + */ + case NPY_LONGLONG: + __pyx_v_f = ((char *)"q"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286 + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" + */ + case NPY_ULONGLONG: + __pyx_v_f = ((char *)"Q"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287 + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" + */ + case NPY_FLOAT: + __pyx_v_f = ((char *)"f"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":288 + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" + */ + case NPY_DOUBLE: + __pyx_v_f = ((char *)"d"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289 + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" + */ + case NPY_LONGDOUBLE: + __pyx_v_f = ((char *)"g"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":290 + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" + */ + case NPY_CFLOAT: + __pyx_v_f = ((char *)"Zf"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":291 + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< + * elif t == NPY_CLONGDOUBLE: f = "Zg" + * elif t == NPY_OBJECT: f = "O" + */ + case NPY_CDOUBLE: + __pyx_v_f = ((char *)"Zd"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292 + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< + * elif t == NPY_OBJECT: f = "O" + * else: + */ + case NPY_CLONGDOUBLE: + __pyx_v_f = ((char *)"Zg"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":293 + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" + * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + */ + case NPY_OBJECT: + __pyx_v_f = ((char *)"O"); + break; + default: + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":295 + * elif t == NPY_OBJECT: f = "O" + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< + * info.format = f + * return + */ + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_6, 0, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 295, __pyx_L1_error) + break; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296 + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * info.format = f # <<<<<<<<<<<<<< + * return + * else: + */ + __pyx_v_info->format = __pyx_v_f; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297 + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * info.format = f + * return # <<<<<<<<<<<<<< + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 + * info.obj = self + * + * if not hasfields: # <<<<<<<<<<<<<< + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":299 + * return + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 + */ + /*else*/ { + __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":300 + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) + * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< + * offset = 0 + * f = _util_dtypestring(descr, info.format + 1, + */ + (__pyx_v_info->format[0]) = '^'; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":301 + * info.format = PyObject_Malloc(_buffer_format_string_len) + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 # <<<<<<<<<<<<<< + * f = _util_dtypestring(descr, info.format + 1, + * info.format + _buffer_format_string_len, + */ + __pyx_v_offset = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 + * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< + * info.format + _buffer_format_string_len, + * &offset) + */ + __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 302, __pyx_L1_error) + __pyx_v_f = __pyx_t_7; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":305 + * info.format + _buffer_format_string_len, + * &offset) + * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + */ + (__pyx_v_f[0]) = '\x00'; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 + * # experimental exception made for __getbuffer__ and __releasebuffer__ + * # -- the details of this may change. + * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< + * # This implementation of getbuffer is geared towards Cython + * # requirements, and does not yet fullfill the PEP. + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(Py_None); + __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; + } + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_descr); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 + * f[0] = c'\0' # Terminate format string + * + * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + */ + +/* Python wrapper */ +static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ +static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); + __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__releasebuffer__", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":309 + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) # <<<<<<<<<<<<<< + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * PyObject_Free(info.strides) + */ + PyObject_Free(__pyx_v_info->format); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * PyObject_Free(info.strides) + * # info.shape was stored after info.strides in the same block + */ + __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":311 + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * PyObject_Free(info.strides) # <<<<<<<<<<<<<< + * # info.shape was stored after info.strides in the same block + * + */ + PyObject_Free(__pyx_v_info->strides); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * PyObject_Free(info.strides) + * # info.shape was stored after info.strides in the same block + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 + * f[0] = c'\0' # Terminate format string + * + * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":789 + * + * cdef inline object PyArray_MultiIterNew1(a): + * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew2(a, b): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 789, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":792 + * + * cdef inline object PyArray_MultiIterNew2(a, b): + * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 792, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":795 + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 795, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":798 + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 798, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801 + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 801, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805 + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape # <<<<<<<<<<<<<< + * else: + * return () + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); + __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":807 + * return d.subarray.shape + * else: + * return () # <<<<<<<<<<<<<< + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_empty_tuple); + __pyx_r = __pyx_empty_tuple; + goto __pyx_L0; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { + PyArray_Descr *__pyx_v_child = 0; + int __pyx_v_endian_detector; + int __pyx_v_little_endian; + PyObject *__pyx_v_fields = 0; + PyObject *__pyx_v_childname = NULL; + PyObject *__pyx_v_new_offset = NULL; + PyObject *__pyx_v_t = NULL; + char *__pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_t_7; + long __pyx_t_8; + char *__pyx_t_9; + __Pyx_RefNannySetupContext("_util_dtypestring", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814 + * + * cdef dtype child + * cdef int endian_detector = 1 # <<<<<<<<<<<<<< + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * cdef tuple fields + */ + __pyx_v_endian_detector = 1; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":815 + * cdef dtype child + * cdef int endian_detector = 1 + * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< + * cdef tuple fields + * + */ + __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + if (unlikely(__pyx_v_descr->names == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); + __PYX_ERR(1, 818, __pyx_L1_error) + } + __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; + for (;;) { + if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 818, __pyx_L1_error) + #else + __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 818, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + #endif + __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); + __pyx_t_3 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":819 + * + * for childname in descr.names: + * fields = descr.fields[childname] # <<<<<<<<<<<<<< + * child, new_offset = fields + * + */ + if (unlikely(__pyx_v_descr->fields == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 819, __pyx_L1_error) + } + __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); + __pyx_t_3 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":820 + * for childname in descr.names: + * fields = descr.fields[childname] + * child, new_offset = fields # <<<<<<<<<<<<<< + * + * if (end - f) - (new_offset - offset[0]) < 15: + */ + if (likely(__pyx_v_fields != Py_None)) { + PyObject* sequence = __pyx_v_fields; + #if !CYTHON_COMPILING_IN_PYPY + Py_ssize_t size = Py_SIZE(sequence); + #else + Py_ssize_t size = PySequence_Size(sequence); + #endif + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 820, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + #else + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #endif + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 820, __pyx_L1_error) + } + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); + __pyx_t_3 = 0; + __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 823, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 823, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); + if (!__pyx_t_7) { + goto __pyx_L8_next_or; + } else { + } + __pyx_t_7 = (__pyx_v_little_endian != 0); + if (!__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_L8_next_or:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":826 + * + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< + * raise ValueError(u"Non-native byte order not supported") + * # One could encode it in the format string and have Cython + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); + if (__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); + __pyx_t_6 = __pyx_t_7; + __pyx_L7_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 827, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 + * + * # Output padding bytes + * while offset[0] < new_offset: # <<<<<<<<<<<<<< + * f[0] = 120 # "x"; pad byte + * f += 1 + */ + while (1) { + __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!__pyx_t_6) break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838 + * # Output padding bytes + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< + * f += 1 + * offset[0] += 1 + */ + (__pyx_v_f[0]) = 0x78; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":839 + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte + * f += 1 # <<<<<<<<<<<<<< + * offset[0] += 1 + * + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840 + * f[0] = 120 # "x"; pad byte + * f += 1 + * offset[0] += 1 # <<<<<<<<<<<<<< + * + * offset[0] += child.itemsize + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842 + * offset[0] += 1 + * + * offset[0] += child.itemsize # <<<<<<<<<<<<<< + * + * if not PyDataType_HASFIELDS(child): + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":845 + * + * if not PyDataType_HASFIELDS(child): + * t = child.type_num # <<<<<<<<<<<<<< + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") + */ + __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 845, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 847, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 847, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":850 + * + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 98; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 66; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852 + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x68; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853 + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 72; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":854 + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x69; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 73; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856 + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x6C; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":857 + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 76; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x71; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":859 + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 81; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860 + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x66; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":861 + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x64; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":862 + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x67; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":863 + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x66; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":864 + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x64; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":865 + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x67; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":866 + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 79; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":868 + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< + * f += 1 + * else: + */ + /*else*/ { + __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); + __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 868, __pyx_L1_error) + } + __pyx_L15:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":869 + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * f += 1 # <<<<<<<<<<<<<< + * else: + * # Cython ignores struct boundary information ("T{...}"), + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + goto __pyx_L13; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":873 + * # Cython ignores struct boundary information ("T{...}"), + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< + * return f + * + */ + /*else*/ { + __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 873, __pyx_L1_error) + __pyx_v_f = __pyx_t_9; + } + __pyx_L13:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":874 + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) + * return f # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_f; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_child); + __Pyx_XDECREF(__pyx_v_fields); + __Pyx_XDECREF(__pyx_v_childname); + __Pyx_XDECREF(__pyx_v_new_offset); + __Pyx_XDECREF(__pyx_v_t); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 + * + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * cdef PyObject* baseptr + * if base is None: + */ + +static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { + PyObject *__pyx_v_baseptr; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + __Pyx_RefNannySetupContext("set_array_base", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 + * cdef inline void set_array_base(ndarray arr, object base): + * cdef PyObject* baseptr + * if base is None: # <<<<<<<<<<<<<< + * baseptr = NULL + * else: + */ + __pyx_t_1 = (__pyx_v_base == Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":993 + * cdef PyObject* baseptr + * if base is None: + * baseptr = NULL # <<<<<<<<<<<<<< + * else: + * Py_INCREF(base) # important to do this before decref below! + */ + __pyx_v_baseptr = NULL; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 + * cdef inline void set_array_base(ndarray arr, object base): + * cdef PyObject* baseptr + * if base is None: # <<<<<<<<<<<<<< + * baseptr = NULL + * else: + */ + goto __pyx_L3; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":995 + * baseptr = NULL + * else: + * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< + * baseptr = base + * Py_XDECREF(arr.base) + */ + /*else*/ { + Py_INCREF(__pyx_v_base); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":996 + * else: + * Py_INCREF(base) # important to do this before decref below! + * baseptr = base # <<<<<<<<<<<<<< + * Py_XDECREF(arr.base) + * arr.base = baseptr + */ + __pyx_v_baseptr = ((PyObject *)__pyx_v_base); + } + __pyx_L3:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997 + * Py_INCREF(base) # important to do this before decref below! + * baseptr = base + * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< + * arr.base = baseptr + * + */ + Py_XDECREF(__pyx_v_arr->base); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":998 + * baseptr = base + * Py_XDECREF(arr.base) + * arr.base = baseptr # <<<<<<<<<<<<<< + * + * cdef inline object get_array_base(ndarray arr): + */ + __pyx_v_arr->base = __pyx_v_baseptr; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 + * + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * cdef PyObject* baseptr + * if base is None: + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 + * arr.base = baseptr + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * if arr.base is NULL: + * return None + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("get_array_base", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 + * + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: # <<<<<<<<<<<<<< + * return None + * else: + */ + __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1002 + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: + * return None # <<<<<<<<<<<<<< + * else: + * return arr.base + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(Py_None); + __pyx_r = Py_None; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 + * + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: # <<<<<<<<<<<<<< + * return None + * else: + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1004 + * return None + * else: + * return arr.base # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); + __pyx_r = ((PyObject *)__pyx_v_arr->base); + goto __pyx_L0; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 + * arr.base = baseptr + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * if arr.base is NULL: + * return None + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * _import_array() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_array", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1011 + * cdef inline int import_array() except -1: + * try: + * _import_array() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") + */ + __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1011, __pyx_L3_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1012 + * try: + * _import_array() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.multiarray failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1012, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 + * _import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1013, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1013, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * _import_array() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_umath", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1017 + * cdef inline int import_umath() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1017, __pyx_L3_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1018 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1018, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1019, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1019, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_ufunc", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1023 + * cdef inline int import_ufunc() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1023, __pyx_L3_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1024 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1024, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1025, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1025, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_boundary_fill(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_boundary_fill}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "boundary_fill", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_kp_s_Convolution_kernel_must_have_odd, __pyx_k_Convolution_kernel_must_have_odd, sizeof(__pyx_k_Convolution_kernel_must_have_odd), 0, 0, 1, 0}, + {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, + {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, + {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, + {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, + {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, + {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, + {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, + {&__pyx_kp_s_astropy_convolution_boundary_fil, __pyx_k_astropy_convolution_boundary_fil, sizeof(__pyx_k_astropy_convolution_boundary_fil), 0, 0, 1, 0}, + {&__pyx_n_s_astropy_convolution_boundary_fil_2, __pyx_k_astropy_convolution_boundary_fil_2, sizeof(__pyx_k_astropy_convolution_boundary_fil_2), 0, 0, 1, 1}, + {&__pyx_n_s_bot, __pyx_k_bot, sizeof(__pyx_k_bot), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_conv, __pyx_k_conv, sizeof(__pyx_k_conv), 0, 0, 1, 1}, + {&__pyx_n_s_convolve1d_boundary_fill, __pyx_k_convolve1d_boundary_fill, sizeof(__pyx_k_convolve1d_boundary_fill), 0, 0, 1, 1}, + {&__pyx_n_s_convolve2d_boundary_fill, __pyx_k_convolve2d_boundary_fill, sizeof(__pyx_k_convolve2d_boundary_fill), 0, 0, 1, 1}, + {&__pyx_n_s_convolve3d_boundary_fill, __pyx_k_convolve3d_boundary_fill, sizeof(__pyx_k_convolve3d_boundary_fill), 0, 0, 1, 1}, + {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, + {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1}, + {&__pyx_n_s_f, __pyx_k_f, sizeof(__pyx_k_f), 0, 0, 1, 1}, + {&__pyx_n_s_fill_value, __pyx_k_fill_value, sizeof(__pyx_k_fill_value), 0, 0, 1, 1}, + {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, + {&__pyx_n_s_g, __pyx_k_g, sizeof(__pyx_k_g), 0, 0, 1, 1}, + {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, + {&__pyx_n_s_ii, __pyx_k_ii, sizeof(__pyx_k_ii), 0, 0, 1, 1}, + {&__pyx_n_s_iii, __pyx_k_iii, sizeof(__pyx_k_iii), 0, 0, 1, 1}, + {&__pyx_n_s_iimax, __pyx_k_iimax, sizeof(__pyx_k_iimax), 0, 0, 1, 1}, + {&__pyx_n_s_iimin, __pyx_k_iimin, sizeof(__pyx_k_iimin), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, + {&__pyx_n_s_jj, __pyx_k_jj, sizeof(__pyx_k_jj), 0, 0, 1, 1}, + {&__pyx_n_s_jjj, __pyx_k_jjj, sizeof(__pyx_k_jjj), 0, 0, 1, 1}, + {&__pyx_n_s_jjmax, __pyx_k_jjmax, sizeof(__pyx_k_jjmax), 0, 0, 1, 1}, + {&__pyx_n_s_jjmin, __pyx_k_jjmin, sizeof(__pyx_k_jjmin), 0, 0, 1, 1}, + {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, + {&__pyx_n_s_ker, __pyx_k_ker, sizeof(__pyx_k_ker), 0, 0, 1, 1}, + {&__pyx_n_s_kk, __pyx_k_kk, sizeof(__pyx_k_kk), 0, 0, 1, 1}, + {&__pyx_n_s_kkk, __pyx_k_kkk, sizeof(__pyx_k_kkk), 0, 0, 1, 1}, + {&__pyx_n_s_kkmax, __pyx_k_kkmax, sizeof(__pyx_k_kkmax), 0, 0, 1, 1}, + {&__pyx_n_s_kkmin, __pyx_k_kkmin, sizeof(__pyx_k_kkmin), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, + {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, + {&__pyx_n_s_nkx, __pyx_k_nkx, sizeof(__pyx_k_nkx), 0, 0, 1, 1}, + {&__pyx_n_s_nky, __pyx_k_nky, sizeof(__pyx_k_nky), 0, 0, 1, 1}, + {&__pyx_n_s_nkz, __pyx_k_nkz, sizeof(__pyx_k_nkz), 0, 0, 1, 1}, + {&__pyx_n_s_normalize_by_kernel, __pyx_k_normalize_by_kernel, sizeof(__pyx_k_normalize_by_kernel), 0, 0, 1, 1}, + {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, + {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, + {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, + {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, + {&__pyx_n_s_nx, __pyx_k_nx, sizeof(__pyx_k_nx), 0, 0, 1, 1}, + {&__pyx_n_s_ny, __pyx_k_ny, sizeof(__pyx_k_ny), 0, 0, 1, 1}, + {&__pyx_n_s_nz, __pyx_k_nz, sizeof(__pyx_k_nz), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_n_s_top, __pyx_k_top, sizeof(__pyx_k_top), 0, 0, 1, 1}, + {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, + {&__pyx_n_s_val, __pyx_k_val, sizeof(__pyx_k_val), 0, 0, 1, 1}, + {&__pyx_n_s_wkx, __pyx_k_wkx, sizeof(__pyx_k_wkx), 0, 0, 1, 1}, + {&__pyx_n_s_wky, __pyx_k_wky, sizeof(__pyx_k_wky), 0, 0, 1, 1}, + {&__pyx_n_s_wkz, __pyx_k_wkz, sizeof(__pyx_k_wkz), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 24, __pyx_L1_error) + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 43, __pyx_L1_error) + __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 823, __pyx_L1_error) + __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1013, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "astropy/convolution/boundary_fill.pyx":24 + * + * if g.shape[0] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Convolution_kernel_must_have_odd); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple_); + __Pyx_GIVEREF(__pyx_tuple_); + + /* "astropy/convolution/boundary_fill.pyx":76 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Convolution_kernel_must_have_odd); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 76, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "astropy/convolution/boundary_fill.pyx":135 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_Convolution_kernel_must_have_odd); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 135, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + */ + __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< + * + * info.buf = PyArray_DATA(self) + */ + __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 239, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + */ + __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 276, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 823, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__7); + __Pyx_GIVEREF(__pyx_tuple__7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 827, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__8); + __Pyx_GIVEREF(__pyx_tuple__8); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 847, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__9); + __Pyx_GIVEREF(__pyx_tuple__9); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 + * _import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__10); + __Pyx_GIVEREF(__pyx_tuple__10); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 1019, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__11); + __Pyx_GIVEREF(__pyx_tuple__11); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + */ + __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 1025, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__12); + __Pyx_GIVEREF(__pyx_tuple__12); + + /* "astropy/convolution/boundary_fill.pyx":17 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_fill(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * float fill_value, + */ + __pyx_tuple__13 = PyTuple_Pack(17, __pyx_n_s_f, __pyx_n_s_g, __pyx_n_s_fill_value, __pyx_n_s_normalize_by_kernel, __pyx_n_s_nx, __pyx_n_s_nkx, __pyx_n_s_wkx, __pyx_n_s_conv, __pyx_n_s_i, __pyx_n_s_iii, __pyx_n_s_ii, __pyx_n_s_iimin, __pyx_n_s_iimax, __pyx_n_s_top, __pyx_n_s_bot, __pyx_n_s_ker, __pyx_n_s_val); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__13); + __Pyx_GIVEREF(__pyx_tuple__13); + __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(4, 0, 17, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_convolution_boundary_fil, __pyx_n_s_convolve1d_boundary_fill, 17, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) __PYX_ERR(0, 17, __pyx_L1_error) + + /* "astropy/convolution/boundary_fill.pyx":69 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_fill(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * float fill_value, + */ + __pyx_tuple__15 = PyTuple_Pack(25, __pyx_n_s_f, __pyx_n_s_g, __pyx_n_s_fill_value, __pyx_n_s_normalize_by_kernel, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_nkx, __pyx_n_s_nky, __pyx_n_s_wkx, __pyx_n_s_wky, __pyx_n_s_conv, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_iii, __pyx_n_s_jjj, __pyx_n_s_ii, __pyx_n_s_jj, __pyx_n_s_iimin, __pyx_n_s_iimax, __pyx_n_s_jjmin, __pyx_n_s_jjmax, __pyx_n_s_top, __pyx_n_s_bot, __pyx_n_s_ker, __pyx_n_s_val); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__15); + __Pyx_GIVEREF(__pyx_tuple__15); + __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(4, 0, 25, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_convolution_boundary_fil, __pyx_n_s_convolve2d_boundary_fill, 69, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) __PYX_ERR(0, 69, __pyx_L1_error) + + /* "astropy/convolution/boundary_fill.pyx":129 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_fill(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * float fill_value, + */ + __pyx_tuple__17 = PyTuple_Pack(33, __pyx_n_s_f, __pyx_n_s_g, __pyx_n_s_fill_value, __pyx_n_s_normalize_by_kernel, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_nz, __pyx_n_s_nkx, __pyx_n_s_nky, __pyx_n_s_nkz, __pyx_n_s_wkx, __pyx_n_s_wky, __pyx_n_s_wkz, __pyx_n_s_conv, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_iii, __pyx_n_s_jjj, __pyx_n_s_kkk, __pyx_n_s_ii, __pyx_n_s_jj, __pyx_n_s_kk, __pyx_n_s_iimin, __pyx_n_s_iimax, __pyx_n_s_jjmin, __pyx_n_s_jjmax, __pyx_n_s_kkmin, __pyx_n_s_kkmax, __pyx_n_s_top, __pyx_n_s_bot, __pyx_n_s_ker, __pyx_n_s_val); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__17); + __Pyx_GIVEREF(__pyx_tuple__17); + __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(4, 0, 33, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_convolution_boundary_fil, __pyx_n_s_convolve3d_boundary_fill, 129, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + return 0; + __pyx_L1_error:; + return -1; +} + +#if PY_MAJOR_VERSION < 3 +PyMODINIT_FUNC initboundary_fill(void); /*proto*/ +PyMODINIT_FUNC initboundary_fill(void) +#else +PyMODINIT_FUNC PyInit_boundary_fill(void); /*proto*/ +PyMODINIT_FUNC PyInit_boundary_fill(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + result = PyDict_SetItemString(moddict, to_name, value); + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static int __pyx_pymod_exec_boundary_fill(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; + #endif + #if CYTHON_REFNANNY + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); + if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); + } + #endif + __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_boundary_fill(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("boundary_fill", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + #if CYTHON_COMPILING_IN_PYPY + Py_INCREF(__pyx_b); + #endif + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_astropy__convolution__boundary_fill) { + if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "astropy.convolution.boundary_fill")) { + if (unlikely(PyDict_SetItemString(modules, "astropy.convolution.boundary_fill", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global init code ---*/ + /*--- Variable export code ---*/ + /*--- Function export code ---*/ + /*--- Type init code ---*/ + /*--- Type import code ---*/ + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", + #if CYTHON_COMPILING_IN_PYPY + sizeof(PyTypeObject), + #else + sizeof(PyHeapTypeObject), + #endif + 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) + __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 163, __pyx_L1_error) + __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 185, __pyx_L1_error) + __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 189, __pyx_L1_error) + __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 198, __pyx_L1_error) + __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 885, __pyx_L1_error) + /*--- Variable import code ---*/ + /*--- Function import code ---*/ + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "astropy/convolution/boundary_fill.pyx":3 + * # Licensed under a 3-clause BSD style license - see LICENSE.rst + * from __future__ import division + * import numpy as np # <<<<<<<<<<<<<< + * cimport numpy as np + * + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 3, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/convolution/boundary_fill.pyx":7 + * + * + * DTYPE = np.float # <<<<<<<<<<<<<< + * ctypedef np.float_t DTYPE_t + * + */ + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) __PYX_ERR(0, 7, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_fill.pyx":17 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_fill(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * float fill_value, + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7astropy_11convolution_13boundary_fill_1convolve1d_boundary_fill, NULL, __pyx_n_s_astropy_convolution_boundary_fil_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_convolve1d_boundary_fill, __pyx_t_2) < 0) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_fill.pyx":69 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_fill(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * float fill_value, + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7astropy_11convolution_13boundary_fill_3convolve2d_boundary_fill, NULL, __pyx_n_s_astropy_convolution_boundary_fil_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_convolve2d_boundary_fill, __pyx_t_2) < 0) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_fill.pyx":129 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_fill(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * float fill_value, + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7astropy_11convolution_13boundary_fill_5convolve3d_boundary_fill, NULL, __pyx_n_s_astropy_convolution_boundary_fil_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_convolve3d_boundary_fill, __pyx_t_2) < 0) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_fill.pyx":1 + * # Licensed under a 3-clause BSD style license - see LICENSE.rst # <<<<<<<<<<<<<< + * from __future__ import division + * import numpy as np + */ + __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init astropy.convolution.boundary_fill", 0, __pyx_lineno, __pyx_filename); + } + Py_DECREF(__pyx_m); __pyx_m = 0; + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init astropy.convolution.boundary_fill"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule((char *)modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* ArgTypeTest */ +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", + name, type->tp_name, Py_TYPE(obj)->tp_name); + return 0; +} + +/* IsLittleEndian */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) +{ + union { + uint32_t u32; + uint8_t u8[4]; + } S; + S.u32 = 0x01020304; + return S.u8[0] == 4; +} + +/* BufferFormatCheck */ +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type) { + stack[0].field = &ctx->root; + stack[0].parent_offset = 0; + ctx->root.type = type; + ctx->root.name = "buffer dtype"; + ctx->root.offset = 0; + ctx->head = stack; + ctx->head->field = &ctx->root; + ctx->fmt_offset = 0; + ctx->head->parent_offset = 0; + ctx->new_packmode = '@'; + ctx->enc_packmode = '@'; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->is_complex = 0; + ctx->is_valid_array = 0; + ctx->struct_alignment = 0; + while (type->typegroup == 'S') { + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = 0; + type = type->fields->type; + } +} +static int __Pyx_BufFmt_ParseNumber(const char** ts) { + int count; + const char* t = *ts; + if (*t < '0' || *t > '9') { + return -1; + } else { + count = *t++ - '0'; + while (*t >= '0' && *t < '9') { + count *= 10; + count += *t++ - '0'; + } + } + *ts = t; + return count; +} +static int __Pyx_BufFmt_ExpectNumber(const char **ts) { + int number = __Pyx_BufFmt_ParseNumber(ts); + if (number == -1) + PyErr_Format(PyExc_ValueError,\ + "Does not understand character buffer dtype format string ('%c')", **ts); + return number; +} +static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { + PyErr_Format(PyExc_ValueError, + "Unexpected format string character: '%c'", ch); +} +static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { + switch (ch) { + case 'c': return "'char'"; + case 'b': return "'signed char'"; + case 'B': return "'unsigned char'"; + case 'h': return "'short'"; + case 'H': return "'unsigned short'"; + case 'i': return "'int'"; + case 'I': return "'unsigned int'"; + case 'l': return "'long'"; + case 'L': return "'unsigned long'"; + case 'q': return "'long long'"; + case 'Q': return "'unsigned long long'"; + case 'f': return (is_complex ? "'complex float'" : "'float'"); + case 'd': return (is_complex ? "'complex double'" : "'double'"); + case 'g': return (is_complex ? "'complex long double'" : "'long double'"); + case 'T': return "a struct"; + case 'O': return "Python object"; + case 'P': return "a pointer"; + case 's': case 'p': return "a string"; + case 0: return "end"; + default: return "unparseable format string"; + } +} +static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return 2; + case 'i': case 'I': case 'l': case 'L': return 4; + case 'q': case 'Q': return 8; + case 'f': return (is_complex ? 8 : 4); + case 'd': return (is_complex ? 16 : 8); + case 'g': { + PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); + return 0; + } + case 'O': case 'P': return sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { + switch (ch) { + case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(short); + case 'i': case 'I': return sizeof(int); + case 'l': case 'L': return sizeof(long); + #ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(PY_LONG_LONG); + #endif + case 'f': return sizeof(float) * (is_complex ? 2 : 1); + case 'd': return sizeof(double) * (is_complex ? 2 : 1); + case 'g': return sizeof(long double) * (is_complex ? 2 : 1); + case 'O': case 'P': return sizeof(void*); + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +typedef struct { char c; short x; } __Pyx_st_short; +typedef struct { char c; int x; } __Pyx_st_int; +typedef struct { char c; long x; } __Pyx_st_long; +typedef struct { char c; float x; } __Pyx_st_float; +typedef struct { char c; double x; } __Pyx_st_double; +typedef struct { char c; long double x; } __Pyx_st_longdouble; +typedef struct { char c; void *x; } __Pyx_st_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_st_float) - sizeof(float); + case 'd': return sizeof(__Pyx_st_double) - sizeof(double); + case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +/* These are for computing the padding at the end of the struct to align + on the first member of the struct. This will probably the same as above, + but we don't have any guarantees. + */ +typedef struct { short x; char c; } __Pyx_pad_short; +typedef struct { int x; char c; } __Pyx_pad_int; +typedef struct { long x; char c; } __Pyx_pad_long; +typedef struct { float x; char c; } __Pyx_pad_float; +typedef struct { double x; char c; } __Pyx_pad_double; +typedef struct { long double x; char c; } __Pyx_pad_longdouble; +typedef struct { void *x; char c; } __Pyx_pad_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); + case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); + case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { + switch (ch) { + case 'c': + return 'H'; + case 'b': case 'h': case 'i': + case 'l': case 'q': case 's': case 'p': + return 'I'; + case 'B': case 'H': case 'I': case 'L': case 'Q': + return 'U'; + case 'f': case 'd': case 'g': + return (is_complex ? 'C' : 'R'); + case 'O': + return 'O'; + case 'P': + return 'P'; + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { + if (ctx->head == NULL || ctx->head->field == &ctx->root) { + const char* expected; + const char* quote; + if (ctx->head == NULL) { + expected = "end"; + quote = ""; + } else { + expected = ctx->head->field->type->name; + quote = "'"; + } + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected %s%s%s but got %s", + quote, expected, quote, + __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); + } else { + __Pyx_StructField* field = ctx->head->field; + __Pyx_StructField* parent = (ctx->head - 1)->field; + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", + field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), + parent->type->name, field->name); + } +} +static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { + char group; + size_t size, offset, arraysize = 1; + if (ctx->enc_type == 0) return 0; + if (ctx->head->field->type->arraysize[0]) { + int i, ndim = 0; + if (ctx->enc_type == 's' || ctx->enc_type == 'p') { + ctx->is_valid_array = ctx->head->field->type->ndim == 1; + ndim = 1; + if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %zu", + ctx->head->field->type->arraysize[0], ctx->enc_count); + return -1; + } + } + if (!ctx->is_valid_array) { + PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", + ctx->head->field->type->ndim, ndim); + return -1; + } + for (i = 0; i < ctx->head->field->type->ndim; i++) { + arraysize *= ctx->head->field->type->arraysize[i]; + } + ctx->is_valid_array = 0; + ctx->enc_count = 1; + } + group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); + do { + __Pyx_StructField* field = ctx->head->field; + __Pyx_TypeInfo* type = field->type; + if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { + size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); + } else { + size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); + } + if (ctx->enc_packmode == '@') { + size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); + size_t align_mod_offset; + if (align_at == 0) return -1; + align_mod_offset = ctx->fmt_offset % align_at; + if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; + if (ctx->struct_alignment == 0) + ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, + ctx->is_complex); + } + if (type->size != size || type->typegroup != group) { + if (type->typegroup == 'C' && type->fields != NULL) { + size_t parent_offset = ctx->head->parent_offset + field->offset; + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = parent_offset; + continue; + } + if ((type->typegroup == 'H' || group == 'H') && type->size == size) { + } else { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + } + offset = ctx->head->parent_offset + field->offset; + if (ctx->fmt_offset != offset) { + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", + (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); + return -1; + } + ctx->fmt_offset += size; + if (arraysize) + ctx->fmt_offset += (arraysize - 1) * size; + --ctx->enc_count; + while (1) { + if (field == &ctx->root) { + ctx->head = NULL; + if (ctx->enc_count != 0) { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + break; + } + ctx->head->field = ++field; + if (field->type == NULL) { + --ctx->head; + field = ctx->head->field; + continue; + } else if (field->type->typegroup == 'S') { + size_t parent_offset = ctx->head->parent_offset + field->offset; + if (field->type->fields->type == NULL) continue; + field = field->type->fields; + ++ctx->head; + ctx->head->field = field; + ctx->head->parent_offset = parent_offset; + break; + } else { + break; + } + } + } while (ctx->enc_count); + ctx->enc_type = 0; + ctx->is_complex = 0; + return 0; +} +static PyObject * +__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) +{ + const char *ts = *tsp; + int i = 0, number; + int ndim = ctx->head->field->type->ndim; +; + ++ts; + if (ctx->new_count != 1) { + PyErr_SetString(PyExc_ValueError, + "Cannot handle repeated arrays in format string"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + while (*ts && *ts != ')') { + switch (*ts) { + case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; + default: break; + } + number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) + return PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %d", + ctx->head->field->type->arraysize[i], number); + if (*ts != ',' && *ts != ')') + return PyErr_Format(PyExc_ValueError, + "Expected a comma in format string, got '%c'", *ts); + if (*ts == ',') ts++; + i++; + } + if (i != ndim) + return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", + ctx->head->field->type->ndim, i); + if (!*ts) { + PyErr_SetString(PyExc_ValueError, + "Unexpected end of format string, expected ')'"); + return NULL; + } + ctx->is_valid_array = 1; + ctx->new_count = 1; + *tsp = ++ts; + return Py_None; +} +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { + int got_Z = 0; + while (1) { + switch(*ts) { + case 0: + if (ctx->enc_type != 0 && ctx->head == NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + if (ctx->head != NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + return ts; + case ' ': + case '\r': + case '\n': + ++ts; + break; + case '<': + if (!__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '>': + case '!': + if (__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '=': + case '@': + case '^': + ctx->new_packmode = *ts++; + break; + case 'T': + { + const char* ts_after_sub; + size_t i, struct_count = ctx->new_count; + size_t struct_alignment = ctx->struct_alignment; + ctx->new_count = 1; + ++ts; + if (*ts != '{') { + PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + ctx->enc_count = 0; + ctx->struct_alignment = 0; + ++ts; + ts_after_sub = ts; + for (i = 0; i != struct_count; ++i) { + ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); + if (!ts_after_sub) return NULL; + } + ts = ts_after_sub; + if (struct_alignment) ctx->struct_alignment = struct_alignment; + } + break; + case '}': + { + size_t alignment = ctx->struct_alignment; + ++ts; + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + if (alignment && ctx->fmt_offset % alignment) { + ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); + } + } + return ts; + case 'x': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->fmt_offset += ctx->new_count; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->enc_packmode = ctx->new_packmode; + ++ts; + break; + case 'Z': + got_Z = 1; + ++ts; + if (*ts != 'f' && *ts != 'd' && *ts != 'g') { + __Pyx_BufFmt_RaiseUnexpectedChar('Z'); + return NULL; + } + case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': + case 'l': case 'L': case 'q': case 'Q': + case 'f': case 'd': case 'g': + case 'O': case 'p': + if (ctx->enc_type == *ts && got_Z == ctx->is_complex && + ctx->enc_packmode == ctx->new_packmode) { + ctx->enc_count += ctx->new_count; + ctx->new_count = 1; + got_Z = 0; + ++ts; + break; + } + case 's': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_count = ctx->new_count; + ctx->enc_packmode = ctx->new_packmode; + ctx->enc_type = *ts; + ctx->is_complex = got_Z; + ++ts; + ctx->new_count = 1; + got_Z = 0; + break; + case ':': + ++ts; + while(*ts != ':') ++ts; + ++ts; + break; + case '(': + if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; + break; + default: + { + int number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + ctx->new_count = (size_t)number; + } + } + } +} + +/* BufferGetAndValidate */ + static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { + if (unlikely(info->buf == NULL)) return; + if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; + __Pyx_ReleaseBuffer(info); +} +static void __Pyx_ZeroBuffer(Py_buffer* buf) { + buf->buf = NULL; + buf->obj = NULL; + buf->strides = __Pyx_zeros; + buf->shape = __Pyx_zeros; + buf->suboffsets = __Pyx_minusones; +} +static int __Pyx__GetBufferAndValidate( + Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, + int nd, int cast, __Pyx_BufFmt_StackElem* stack) +{ + buf->buf = NULL; + if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { + __Pyx_ZeroBuffer(buf); + return -1; + } + if (unlikely(buf->ndim != nd)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + nd, buf->ndim); + goto fail; + } + if (!cast) { + __Pyx_BufFmt_Context ctx; + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; + } + if (unlikely((unsigned)buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", + buf->itemsize, (buf->itemsize > 1) ? "s" : "", + dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; + return 0; +fail:; + __Pyx_SafeReleaseBuffer(buf); + return -1; +} + +/* None */ + static CYTHON_INLINE long __Pyx_mod_long(long a, long b) { + long r = a % b; + r += ((r != 0) & ((r ^ b) < 0)) * b; + return r; +} + +/* PyObjectCall */ + #if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = func->ob_type->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyErrFetchRestore */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* RaiseException */ + #if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* GetModuleGlobalName */ + static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS + result = PyDict_GetItem(__pyx_d, name); + if (likely(result)) { + Py_INCREF(result); + } else { +#else + result = PyObject_GetItem(__pyx_d, name); + if (!result) { + PyErr_Clear(); +#endif + result = __Pyx_GetBuiltinName(name); + } + return result; +} + +/* None */ + static CYTHON_INLINE long __Pyx_div_long(long a, long b) { + long q = a / b; + long r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* ExtTypeTest */ + static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", + Py_TYPE(obj)->tp_name, type->tp_name); + return 0; +} + +/* RaiseTooManyValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* RaiseNoneIterError */ + static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/* SaveResetException */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if PY_VERSION_HEX >= 0x030700A2 + *type = tstate->exc_state.exc_type; + *value = tstate->exc_state.exc_value; + *tb = tstate->exc_state.exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if PY_VERSION_HEX >= 0x030700A2 + tmp_type = tstate->exc_state.exc_type; + tmp_value = tstate->exc_state.exc_value; + tmp_tb = tstate->exc_state.exc_traceback; + tstate->exc_state.exc_type = type; + tstate->exc_state.exc_value = value; + tstate->exc_state.exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* PyErrExceptionMatches */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* GetException */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { +#endif + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if PY_VERSION_HEX >= 0x030700A2 + tmp_type = tstate->exc_state.exc_type; + tmp_value = tstate->exc_state.exc_value; + tmp_tb = tstate->exc_state.exc_traceback; + tstate->exc_state.exc_type = local_type; + tstate->exc_state.exc_value = local_value; + tstate->exc_state.exc_traceback = local_tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* Import */ + static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if (strchr(__Pyx_MODULE_NAME, '.')) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* CLineInTraceback */ + #ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (PyObject_Not(use_cline) != 0) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ + static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ + #include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +#if PY_MAJOR_VERSION < 3 +static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { + if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); + PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); + return -1; +} +static void __Pyx_ReleaseBuffer(Py_buffer *view) { + PyObject *obj = view->obj; + if (!obj) return; + if (PyObject_CheckBuffer(obj)) { + PyBuffer_Release(view); + return; + } + if ((0)) {} + else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); + view->obj = NULL; + Py_DECREF(obj); +} +#endif + + + /* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { + const int neg_one = (int) -1, const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* CIntFromPyVerify */ + #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return ::std::complex< float >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return x + y*(__pyx_t_float_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + __pyx_t_float_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabsf(b.real) >= fabsf(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + float r = b.imag / b.real; + float s = 1.0 / (b.real + b.imag * r); + return __pyx_t_float_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + float r = b.real / b.imag; + float s = 1.0 / (b.imag + b.real * r); + return __pyx_t_float_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + float denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_float_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrtf(z.real*z.real + z.imag*z.imag); + #else + return hypotf(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + float r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + float denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(a, a); + case 3: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, a); + case 4: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = powf(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2f(0, -1); + } + } else { + r = __Pyx_c_abs_float(a); + theta = atan2f(a.imag, a.real); + } + lnr = logf(r); + z_r = expf(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cosf(z_theta); + z.imag = z_r * sinf(z_theta); + return z; + } + #endif +#endif + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return ::std::complex< double >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return x + y*(__pyx_t_double_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + __pyx_t_double_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabs(b.real) >= fabs(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + double r = b.imag / b.real; + double s = 1.0 / (b.real + b.imag * r); + return __pyx_t_double_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + double r = b.real / b.imag; + double s = 1.0 / (b.imag + b.real * r); + return __pyx_t_double_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + double denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_double_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrt(z.real*z.real + z.imag*z.imag); + #else + return hypot(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + double r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + double denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(a, a); + case 3: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, a); + case 4: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = pow(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2(0, -1); + } + } else { + r = __Pyx_c_abs_double(a); + theta = atan2(a.imag, a.real); + } + lnr = log(r); + z_r = exp(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cos(z_theta); + z.imag = z_r * sin(z_theta); + return z; + } + #endif +#endif + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { + const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(enum NPY_TYPES) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(enum NPY_TYPES) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { + const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(unsigned int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (unsigned int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0]) + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) { + return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) { + return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) { + return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (unsigned int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(unsigned int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, +digits[0]) + case -2: + if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(unsigned int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + unsigned int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (unsigned int) -1; + } + } else { + unsigned int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (unsigned int) -1; + val = __Pyx_PyInt_As_unsigned_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to unsigned int"); + return (unsigned int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to unsigned int"); + return (unsigned int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { + const int neg_one = (int) -1, const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { + const long neg_one = (long) -1, const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { + const long neg_one = (long) -1, const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* FastTypeChecks */ + #if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { + if (likely(err == exc_type)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); + } + return PyErr_GivenExceptionMatches(err, exc_type); +} +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { + if (likely(err == exc_type1 || err == exc_type2)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); + } + return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); +} +#endif + +/* CheckBinaryVersion */ + static int __Pyx_check_binary_version(void) { + char ctversion[4], rtversion[4]; + PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); + PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); + if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { + char message[200]; + PyOS_snprintf(message, sizeof(message), + "compiletime version %s of module '%.100s' " + "does not match runtime version %s", + ctversion, __Pyx_MODULE_NAME, rtversion); + return PyErr_WarnEx(NULL, message, 1); + } + return 0; +} + +/* ModuleImport */ + #ifndef __PYX_HAVE_RT_ImportModule +#define __PYX_HAVE_RT_ImportModule +static PyObject *__Pyx_ImportModule(const char *name) { + PyObject *py_name = 0; + PyObject *py_module = 0; + py_name = __Pyx_PyIdentifier_FromString(name); + if (!py_name) + goto bad; + py_module = PyImport_Import(py_name); + Py_DECREF(py_name); + return py_module; +bad: + Py_XDECREF(py_name); + return 0; +} +#endif + +/* TypeImport */ + #ifndef __PYX_HAVE_RT_ImportType +#define __PYX_HAVE_RT_ImportType +static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, + size_t size, int strict) +{ + PyObject *py_module = 0; + PyObject *result = 0; + PyObject *py_name = 0; + char warning[200]; + Py_ssize_t basicsize; +#ifdef Py_LIMITED_API + PyObject *py_basicsize; +#endif + py_module = __Pyx_ImportModule(module_name); + if (!py_module) + goto bad; + py_name = __Pyx_PyIdentifier_FromString(class_name); + if (!py_name) + goto bad; + result = PyObject_GetAttr(py_module, py_name); + Py_DECREF(py_name); + py_name = 0; + Py_DECREF(py_module); + py_module = 0; + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#ifndef Py_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if (!strict && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", + module_name, class_name, basicsize, size); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + else if ((size_t)basicsize != size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", + module_name, class_name, basicsize, size); + goto bad; + } + return (PyTypeObject *)result; +bad: + Py_XDECREF(py_module); + Py_XDECREF(result); + return NULL; +} +#endif + +/* InitStrings */ + static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + PyErr_Clear(); + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(x); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/astropy/convolution/boundary_fill.pyx b/astropy/convolution/boundary_fill.pyx new file mode 100644 index 0000000..66df519 --- /dev/null +++ b/astropy/convolution/boundary_fill.pyx @@ -0,0 +1,192 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import division +import numpy as np +cimport numpy as np + + +DTYPE = np.float +ctypedef np.float_t DTYPE_t + +cdef extern from "numpy/npy_math.h" nogil: + bint npy_isnan(double x) + +cimport cython + + +@cython.boundscheck(False) # turn off bounds-checking for entire function +def convolve1d_boundary_fill(np.ndarray[DTYPE_t, ndim=1] f, + np.ndarray[DTYPE_t, ndim=1] g, + float fill_value, + bint normalize_by_kernel + ): + + if g.shape[0] % 2 != 1: + raise ValueError("Convolution kernel must have odd dimensions") + + assert f.dtype == DTYPE and g.dtype == DTYPE + + cdef int nx = f.shape[0] + cdef int nkx = g.shape[0] + cdef int wkx = nkx // 2 + cdef np.ndarray[DTYPE_t, ndim=1] conv = np.empty([nx], dtype=DTYPE) + cdef unsigned int i, iii + cdef int ii + + cdef int iimin, iimax + + cdef DTYPE_t top, bot, ker, val + + # release the GIL + with nogil: + + # Now run the proper convolution + for i in range(nx): + top = 0. + bot = 0. + iimin = i - wkx + iimax = i + wkx + 1 + for ii in range(iimin, iimax): + if ii < 0 or ii > nx - 1: + val = fill_value + else: + val = f[ii] + ker = g[(nkx - 1 - (wkx + ii - i))] + if not npy_isnan(val): + top += val * ker + bot += ker + if normalize_by_kernel: + if bot == 0: + conv[i] = f[i] + else: + conv[i] = top / bot + else: + conv[i] = top + # GIL acquired again here + return conv + + +@cython.boundscheck(False) # turn off bounds-checking for entire function +def convolve2d_boundary_fill(np.ndarray[DTYPE_t, ndim=2] f, + np.ndarray[DTYPE_t, ndim=2] g, + float fill_value, + bint normalize_by_kernel + ): + + if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: + raise ValueError("Convolution kernel must have odd dimensions") + + assert f.dtype == DTYPE and g.dtype == DTYPE + + cdef int nx = f.shape[0] + cdef int ny = f.shape[1] + cdef int nkx = g.shape[0] + cdef int nky = g.shape[1] + cdef int wkx = nkx // 2 + cdef int wky = nky // 2 + cdef np.ndarray[DTYPE_t, ndim=2] conv = np.empty([nx, ny], dtype=DTYPE) + cdef unsigned int i, j, iii, jjj + cdef int ii, jj + + cdef int iimin, iimax, jjmin, jjmax + + cdef DTYPE_t top, bot, ker, val + + # release the GIL + with nogil: + + # now run the proper convolution + for i in range(nx): + for j in range(ny): + top = 0. + bot = 0. + iimin = i - wkx + iimax = i + wkx + 1 + jjmin = j - wky + jjmax = j + wky + 1 + for ii in range(iimin, iimax): + for jj in range(jjmin, jjmax): + if ii < 0 or ii > nx - 1 or jj < 0 or jj > ny - 1: + val = fill_value + else: + val = f[ii, jj] + ker = g[(nkx - 1 - (wkx + ii - i)), + (nky - 1 - (wky + jj - j))] + if not npy_isnan(val): + top += val * ker + bot += ker + if normalize_by_kernel: + if bot == 0: + conv[i, j] = f[i, j] + else: + conv[i, j] = top / bot + else: + conv[i, j] = top + # GIL acquired again here + return conv + + +@cython.boundscheck(False) # turn off bounds-checking for entire function +def convolve3d_boundary_fill(np.ndarray[DTYPE_t, ndim=3] f, + np.ndarray[DTYPE_t, ndim=3] g, + float fill_value, + bint normalize_by_kernel): + + if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: + raise ValueError("Convolution kernel must have odd dimensions") + + assert f.dtype == DTYPE and g.dtype == DTYPE + + cdef int nx = f.shape[0] + cdef int ny = f.shape[1] + cdef int nz = f.shape[2] + cdef int nkx = g.shape[0] + cdef int nky = g.shape[1] + cdef int nkz = g.shape[2] + cdef int wkx = nkx // 2 + cdef int wky = nky // 2 + cdef int wkz = nkz // 2 + cdef np.ndarray[DTYPE_t, ndim=3] conv = np.empty([nx, ny, nz], dtype=DTYPE) + cdef unsigned int i, j, k, iii, jjj, kkk + cdef int ii, jj, kk + + cdef int iimin, iimax, jjmin, jjmax, kkmin, kkmax + + cdef DTYPE_t top, bot, ker, val + + # release the GIL + with nogil: + + # Now run the proper convolution + for i in range(nx): + for j in range(ny): + for k in range(nz): + top = 0. + bot = 0. + iimin = i - wkx + iimax = i + wkx + 1 + jjmin = j - wky + jjmax = j + wky + 1 + kkmin = k - wkz + kkmax = k + wkz + 1 + for ii in range(iimin, iimax): + for jj in range(jjmin, jjmax): + for kk in range(kkmin, kkmax): + if ii < 0 or ii > nx - 1 or jj < 0 or jj > ny - 1 or kk < 0 or kk > nz - 1: + val = fill_value + else: + val = f[ii, jj, kk] + ker = g[(nkx - 1 - (wkx + ii - i)), + (nky - 1 - (wky + jj - j)), + (nkz - 1 - (wkz + kk - k))] + if not npy_isnan(val): + top += val * ker + bot += ker + if normalize_by_kernel: + if bot == 0: + conv[i, j, k] = f[i, j, k] + else: + conv[i, j, k] = top / bot + else: + conv[i, j, k] = top + # GIl acquired again here + return conv diff --git a/astropy/convolution/boundary_none.c b/astropy/convolution/boundary_none.c new file mode 100644 index 0000000..71f62dc --- /dev/null +++ b/astropy/convolution/boundary_none.c @@ -0,0 +1,9678 @@ +/* Generated by Cython 0.27.3 */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_27_3" +#define CYTHON_FUTURE_DIVISION 1 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + + +#define __PYX_ERR(f_index, lineno, Ln_error) \ +{ \ + __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ +} + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__astropy__convolution__boundary_none +#define __PYX_HAVE_API__astropy__convolution__boundary_none +#include +#include +#include "numpy/arrayobject.h" +#include "numpy/ufuncobject.h" +#include "numpy/npy_math.h" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + +/* Header.proto */ +#if !defined(CYTHON_CCOMPLEX) + #if defined(__cplusplus) + #define CYTHON_CCOMPLEX 1 + #elif defined(_Complex_I) + #define CYTHON_CCOMPLEX 1 + #else + #define CYTHON_CCOMPLEX 0 + #endif +#endif +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #include + #else + #include + #endif +#endif +#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) + #undef _Complex_I + #define _Complex_I 1.0fj +#endif + + +static const char *__pyx_f[] = { + "astropy/convolution/boundary_none.pyx", + "__init__.pxd", + "type.pxd", +}; +/* BufferFormatStructs.proto */ +#define IS_UNSIGNED(type) (((type) -1) > 0) +struct __Pyx_StructField_; +#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) +typedef struct { + const char* name; + struct __Pyx_StructField_* fields; + size_t size; + size_t arraysize[8]; + int ndim; + char typegroup; + char is_unsigned; + int flags; +} __Pyx_TypeInfo; +typedef struct __Pyx_StructField_ { + __Pyx_TypeInfo* type; + const char* name; + size_t offset; +} __Pyx_StructField; +typedef struct { + __Pyx_StructField* field; + size_t parent_offset; +} __Pyx_BufFmt_StackElem; +typedef struct { + __Pyx_StructField root; + __Pyx_BufFmt_StackElem* head; + size_t fmt_offset; + size_t new_count, enc_count; + size_t struct_alignment; + int is_complex; + char enc_type; + char new_packmode; + char enc_packmode; + char is_valid_array; +} __Pyx_BufFmt_Context; + +/* NoFastGil.proto */ +#define __Pyx_PyGILState_Ensure PyGILState_Ensure +#define __Pyx_PyGILState_Release PyGILState_Release +#define __Pyx_FastGIL_Remember() +#define __Pyx_FastGIL_Forget() +#define __Pyx_FastGilFuncInit() + +/* ForceInitThreads.proto */ +#ifndef __PYX_FORCE_INIT_THREADS + #define __PYX_FORCE_INIT_THREADS 0 +#endif + + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":743 + * # in Cython to enable them only on the right systems. + * + * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + */ +typedef npy_int8 __pyx_t_5numpy_int8_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":744 + * + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t + */ +typedef npy_int16 __pyx_t_5numpy_int16_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":745 + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< + * ctypedef npy_int64 int64_t + * #ctypedef npy_int96 int96_t + */ +typedef npy_int32 __pyx_t_5numpy_int32_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":746 + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< + * #ctypedef npy_int96 int96_t + * #ctypedef npy_int128 int128_t + */ +typedef npy_int64 __pyx_t_5numpy_int64_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":750 + * #ctypedef npy_int128 int128_t + * + * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + */ +typedef npy_uint8 __pyx_t_5numpy_uint8_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":751 + * + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t + */ +typedef npy_uint16 __pyx_t_5numpy_uint16_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":752 + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< + * ctypedef npy_uint64 uint64_t + * #ctypedef npy_uint96 uint96_t + */ +typedef npy_uint32 __pyx_t_5numpy_uint32_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":753 + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< + * #ctypedef npy_uint96 uint96_t + * #ctypedef npy_uint128 uint128_t + */ +typedef npy_uint64 __pyx_t_5numpy_uint64_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":757 + * #ctypedef npy_uint128 uint128_t + * + * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< + * ctypedef npy_float64 float64_t + * #ctypedef npy_float80 float80_t + */ +typedef npy_float32 __pyx_t_5numpy_float32_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":758 + * + * ctypedef npy_float32 float32_t + * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< + * #ctypedef npy_float80 float80_t + * #ctypedef npy_float128 float128_t + */ +typedef npy_float64 __pyx_t_5numpy_float64_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":767 + * # The int types are mapped a bit surprising -- + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t + */ +typedef npy_long __pyx_t_5numpy_int_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":768 + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong longlong_t + * + */ +typedef npy_longlong __pyx_t_5numpy_long_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":769 + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_ulong uint_t + */ +typedef npy_longlong __pyx_t_5numpy_longlong_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":771 + * ctypedef npy_longlong longlong_t + * + * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t + */ +typedef npy_ulong __pyx_t_5numpy_uint_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":772 + * + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulonglong_t + * + */ +typedef npy_ulonglong __pyx_t_5numpy_ulong_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":773 + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_intp intp_t + */ +typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775 + * ctypedef npy_ulonglong ulonglong_t + * + * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< + * ctypedef npy_uintp uintp_t + * + */ +typedef npy_intp __pyx_t_5numpy_intp_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776 + * + * ctypedef npy_intp intp_t + * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< + * + * ctypedef npy_double float_t + */ +typedef npy_uintp __pyx_t_5numpy_uintp_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778 + * ctypedef npy_uintp uintp_t + * + * ctypedef npy_double float_t # <<<<<<<<<<<<<< + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t + */ +typedef npy_double __pyx_t_5numpy_float_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779 + * + * ctypedef npy_double float_t + * ctypedef npy_double double_t # <<<<<<<<<<<<<< + * ctypedef npy_longdouble longdouble_t + * + */ +typedef npy_double __pyx_t_5numpy_double_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":780 + * ctypedef npy_double float_t + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cfloat cfloat_t + */ +typedef npy_longdouble __pyx_t_5numpy_longdouble_t; + +/* "astropy/convolution/boundary_none.pyx":8 + * + * DTYPE = np.float + * ctypedef np.float_t DTYPE_t # <<<<<<<<<<<<<< + * + * cdef extern from "numpy/npy_math.h" nogil: + */ +typedef __pyx_t_5numpy_float_t __pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t; +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< float > __pyx_t_float_complex; + #else + typedef float _Complex __pyx_t_float_complex; + #endif +#else + typedef struct { float real, imag; } __pyx_t_float_complex; +#endif +static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); + +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< double > __pyx_t_double_complex; + #else + typedef double _Complex __pyx_t_double_complex; + #endif +#else + typedef struct { double real, imag; } __pyx_t_double_complex; +#endif +static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); + + +/*--- Type declarations ---*/ + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":782 + * ctypedef npy_longdouble longdouble_t + * + * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t + */ +typedef npy_cfloat __pyx_t_5numpy_cfloat_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":783 + * + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< + * ctypedef npy_clongdouble clongdouble_t + * + */ +typedef npy_cdouble __pyx_t_5numpy_cdouble_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784 + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cdouble complex_t + */ +typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":786 + * ctypedef npy_clongdouble clongdouble_t + * + * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew1(a): + */ +typedef npy_cdouble __pyx_t_5numpy_complex_t; + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* IsLittleEndian.proto */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); + +/* BufferFormatCheck.proto */ +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type); + +/* BufferGetAndValidate.proto */ +#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ + ((obj == Py_None || obj == NULL) ?\ + (__Pyx_ZeroBuffer(buf), 0) :\ + __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) +static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, + __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); +static void __Pyx_ZeroBuffer(Py_buffer* buf); +static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); +static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; +static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + +/* None.proto */ +static CYTHON_INLINE long __Pyx_mod_long(long, long); + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* GetModuleGlobalName.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); + +/* None.proto */ +static CYTHON_INLINE long __Pyx_div_long(long, long); + +/* ExtTypeTest.proto */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); + +#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) +#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) +#define __Pyx_BufPtrStrided3d(type, buf, i0, s0, i1, s1, i2, s2) (type)((char*)buf + i0 * s0 + i1 * s1 + i2 * s2) +/* DictGetItem.proto */ +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { + PyObject *value; + value = PyDict_GetItemWithError(d, key); + if (unlikely(!value)) { + if (!PyErr_Occurred()) { + PyObject* args = PyTuple_Pack(1, key); + if (likely(args)) + PyErr_SetObject(PyExc_KeyError, args); + Py_XDECREF(args); + } + return NULL; + } + Py_INCREF(value); + return value; +} +#else + #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) +#endif + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* RaiseNoneIterError.proto */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* BufferStructDeclare.proto */ +typedef struct { + Py_ssize_t shape, strides, suboffsets; +} __Pyx_Buf_DimInfo; +typedef struct { + size_t refcount; + Py_buffer pybuffer; +} __Pyx_Buffer; +typedef struct { + __Pyx_Buffer *rcbuffer; + char *data; + __Pyx_Buf_DimInfo diminfo[8]; +} __Pyx_LocalBuf_ND; + +#if PY_MAJOR_VERSION < 3 + static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); + static void __Pyx_ReleaseBuffer(Py_buffer *view); +#else + #define __Pyx_GetBuffer PyObject_GetBuffer + #define __Pyx_ReleaseBuffer PyBuffer_Release +#endif + + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* RealImag.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #define __Pyx_CREAL(z) ((z).real()) + #define __Pyx_CIMAG(z) ((z).imag()) + #else + #define __Pyx_CREAL(z) (__real__(z)) + #define __Pyx_CIMAG(z) (__imag__(z)) + #endif +#else + #define __Pyx_CREAL(z) ((z).real) + #define __Pyx_CIMAG(z) ((z).imag) +#endif +#if defined(__cplusplus) && CYTHON_CCOMPLEX\ + && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) + #define __Pyx_SET_CREAL(z,x) ((z).real(x)) + #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) +#else + #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) + #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_float(a, b) ((a)==(b)) + #define __Pyx_c_sum_float(a, b) ((a)+(b)) + #define __Pyx_c_diff_float(a, b) ((a)-(b)) + #define __Pyx_c_prod_float(a, b) ((a)*(b)) + #define __Pyx_c_quot_float(a, b) ((a)/(b)) + #define __Pyx_c_neg_float(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_float(z) ((z)==(float)0) + #define __Pyx_c_conj_float(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_float(z) (::std::abs(z)) + #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_float(z) ((z)==0) + #define __Pyx_c_conj_float(z) (conjf(z)) + #if 1 + #define __Pyx_c_abs_float(z) (cabsf(z)) + #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); + #endif +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_double(a, b) ((a)==(b)) + #define __Pyx_c_sum_double(a, b) ((a)+(b)) + #define __Pyx_c_diff_double(a, b) ((a)-(b)) + #define __Pyx_c_prod_double(a, b) ((a)*(b)) + #define __Pyx_c_quot_double(a, b) ((a)/(b)) + #define __Pyx_c_neg_double(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_double(z) ((z)==(double)0) + #define __Pyx_c_conj_double(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (::std::abs(z)) + #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_double(z) ((z)==0) + #define __Pyx_c_conj_double(z) (conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (cabs(z)) + #define __Pyx_c_pow_double(a, b) (cpow(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); + #endif +#endif + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* PyIdentifierFromString.proto */ +#if !defined(__Pyx_PyIdentifier_FromString) +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) +#else + #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) +#endif +#endif + +/* ModuleImport.proto */ +static PyObject *__Pyx_ImportModule(const char *name); + +/* TypeImport.proto */ +static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + + +/* Module declarations from 'cpython.buffer' */ + +/* Module declarations from 'libc.string' */ + +/* Module declarations from 'libc.stdio' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.type' */ +static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; + +/* Module declarations from 'cpython' */ + +/* Module declarations from 'cpython.object' */ + +/* Module declarations from 'cpython.ref' */ + +/* Module declarations from 'cpython.mem' */ + +/* Module declarations from 'numpy' */ + +/* Module declarations from 'numpy' */ +static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; +static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; +static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; +static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; +static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ + +/* Module declarations from 'cython' */ + +/* Module declarations from 'astropy.convolution.boundary_none' */ +static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t), { 0 }, 0, 'R', 0, 0 }; +#define __Pyx_MODULE_NAME "astropy.convolution.boundary_none" +extern int __pyx_module_is_main_astropy__convolution__boundary_none; +int __pyx_module_is_main_astropy__convolution__boundary_none = 0; + +/* Implementation of 'astropy.convolution.boundary_none' */ +static PyObject *__pyx_builtin_ValueError; +static PyObject *__pyx_builtin_range; +static PyObject *__pyx_builtin_RuntimeError; +static PyObject *__pyx_builtin_ImportError; +static const char __pyx_k_f[] = "f"; +static const char __pyx_k_g[] = "g"; +static const char __pyx_k_i[] = "i"; +static const char __pyx_k_j[] = "j"; +static const char __pyx_k_k[] = "k"; +static const char __pyx_k_ii[] = "ii"; +static const char __pyx_k_jj[] = "jj"; +static const char __pyx_k_kk[] = "kk"; +static const char __pyx_k_np[] = "np"; +static const char __pyx_k_nx[] = "nx"; +static const char __pyx_k_ny[] = "ny"; +static const char __pyx_k_nz[] = "nz"; +static const char __pyx_k_bot[] = "bot"; +static const char __pyx_k_ker[] = "ker"; +static const char __pyx_k_nkx[] = "nkx"; +static const char __pyx_k_nky[] = "nky"; +static const char __pyx_k_nkz[] = "nkz"; +static const char __pyx_k_top[] = "top"; +static const char __pyx_k_val[] = "val"; +static const char __pyx_k_wkx[] = "wkx"; +static const char __pyx_k_wky[] = "wky"; +static const char __pyx_k_wkz[] = "wkz"; +static const char __pyx_k_conv[] = "conv"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_DTYPE[] = "DTYPE"; +static const char __pyx_k_dtype[] = "dtype"; +static const char __pyx_k_float[] = "float"; +static const char __pyx_k_iimax[] = "iimax"; +static const char __pyx_k_iimin[] = "iimin"; +static const char __pyx_k_jjmax[] = "jjmax"; +static const char __pyx_k_jjmin[] = "jjmin"; +static const char __pyx_k_kkmax[] = "kkmax"; +static const char __pyx_k_kkmin[] = "kkmin"; +static const char __pyx_k_numpy[] = "numpy"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_zeros[] = "zeros"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_ValueError[] = "ValueError"; +static const char __pyx_k_ImportError[] = "ImportError"; +static const char __pyx_k_RuntimeError[] = "RuntimeError"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_normalize_by_kernel[] = "normalize_by_kernel"; +static const char __pyx_k_convolve1d_boundary_none[] = "convolve1d_boundary_none"; +static const char __pyx_k_convolve2d_boundary_none[] = "convolve2d_boundary_none"; +static const char __pyx_k_convolve3d_boundary_none[] = "convolve3d_boundary_none"; +static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; +static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; +static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; +static const char __pyx_k_Convolution_kernel_must_have_odd[] = "Convolution kernel must have odd dimensions"; +static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; +static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; +static const char __pyx_k_astropy_convolution_boundary_non[] = "astropy/convolution/boundary_none.pyx"; +static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; +static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; +static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; +static const char __pyx_k_astropy_convolution_boundary_non_2[] = "astropy.convolution.boundary_none"; +static PyObject *__pyx_kp_s_Convolution_kernel_must_have_odd; +static PyObject *__pyx_n_s_DTYPE; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; +static PyObject *__pyx_n_s_ImportError; +static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; +static PyObject *__pyx_n_s_RuntimeError; +static PyObject *__pyx_n_s_ValueError; +static PyObject *__pyx_kp_s_astropy_convolution_boundary_non; +static PyObject *__pyx_n_s_astropy_convolution_boundary_non_2; +static PyObject *__pyx_n_s_bot; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_conv; +static PyObject *__pyx_n_s_convolve1d_boundary_none; +static PyObject *__pyx_n_s_convolve2d_boundary_none; +static PyObject *__pyx_n_s_convolve3d_boundary_none; +static PyObject *__pyx_n_s_dtype; +static PyObject *__pyx_n_s_f; +static PyObject *__pyx_n_s_float; +static PyObject *__pyx_n_s_g; +static PyObject *__pyx_n_s_i; +static PyObject *__pyx_n_s_ii; +static PyObject *__pyx_n_s_iimax; +static PyObject *__pyx_n_s_iimin; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_n_s_j; +static PyObject *__pyx_n_s_jj; +static PyObject *__pyx_n_s_jjmax; +static PyObject *__pyx_n_s_jjmin; +static PyObject *__pyx_n_s_k; +static PyObject *__pyx_n_s_ker; +static PyObject *__pyx_n_s_kk; +static PyObject *__pyx_n_s_kkmax; +static PyObject *__pyx_n_s_kkmin; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; +static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; +static PyObject *__pyx_n_s_nkx; +static PyObject *__pyx_n_s_nky; +static PyObject *__pyx_n_s_nkz; +static PyObject *__pyx_n_s_normalize_by_kernel; +static PyObject *__pyx_n_s_np; +static PyObject *__pyx_n_s_numpy; +static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; +static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; +static PyObject *__pyx_n_s_nx; +static PyObject *__pyx_n_s_ny; +static PyObject *__pyx_n_s_nz; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_n_s_top; +static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; +static PyObject *__pyx_n_s_val; +static PyObject *__pyx_n_s_wkx; +static PyObject *__pyx_n_s_wky; +static PyObject *__pyx_n_s_wkz; +static PyObject *__pyx_n_s_zeros; +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_none_convolve1d_boundary_none(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel); /* proto */ +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_none_2convolve2d_boundary_none(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel); /* proto */ +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_none_4convolve3d_boundary_none(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel); /* proto */ +static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ +static PyObject *__pyx_tuple_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_tuple__7; +static PyObject *__pyx_tuple__8; +static PyObject *__pyx_tuple__9; +static PyObject *__pyx_tuple__10; +static PyObject *__pyx_tuple__11; +static PyObject *__pyx_tuple__12; +static PyObject *__pyx_tuple__13; +static PyObject *__pyx_tuple__15; +static PyObject *__pyx_tuple__17; +static PyObject *__pyx_codeobj__14; +static PyObject *__pyx_codeobj__16; +static PyObject *__pyx_codeobj__18; + +/* "astropy/convolution/boundary_none.pyx":17 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_none(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * bint normalize_by_kernel): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_none_1convolve1d_boundary_none(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_11convolution_13boundary_none_1convolve1d_boundary_none = {"convolve1d_boundary_none", (PyCFunction)__pyx_pw_7astropy_11convolution_13boundary_none_1convolve1d_boundary_none, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_none_1convolve1d_boundary_none(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_f = 0; + PyArrayObject *__pyx_v_g = 0; + int __pyx_v_normalize_by_kernel; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("convolve1d_boundary_none (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_f,&__pyx_n_s_g,&__pyx_n_s_normalize_by_kernel,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_f)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_g)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve1d_boundary_none", 1, 3, 3, 1); __PYX_ERR(0, 17, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_normalize_by_kernel)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve1d_boundary_none", 1, 3, 3, 2); __PYX_ERR(0, 17, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "convolve1d_boundary_none") < 0)) __PYX_ERR(0, 17, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_f = ((PyArrayObject *)values[0]); + __pyx_v_g = ((PyArrayObject *)values[1]); + __pyx_v_normalize_by_kernel = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_normalize_by_kernel == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("convolve1d_boundary_none", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 17, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.convolution.boundary_none.convolve1d_boundary_none", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_f), __pyx_ptype_5numpy_ndarray, 1, "f", 0))) __PYX_ERR(0, 17, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_g), __pyx_ptype_5numpy_ndarray, 1, "g", 0))) __PYX_ERR(0, 18, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_11convolution_13boundary_none_convolve1d_boundary_none(__pyx_self, __pyx_v_f, __pyx_v_g, __pyx_v_normalize_by_kernel); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_none_convolve1d_boundary_none(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel) { + int __pyx_v_nx; + int __pyx_v_nkx; + int __pyx_v_wkx; + PyArrayObject *__pyx_v_conv = 0; + unsigned int __pyx_v_i; + unsigned int __pyx_v_ii; + __pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t __pyx_v_top; + __pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t __pyx_v_bot; + __pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t __pyx_v_ker; + __pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t __pyx_v_val; + __Pyx_LocalBuf_ND __pyx_pybuffernd_conv; + __Pyx_Buffer __pyx_pybuffer_conv; + __Pyx_LocalBuf_ND __pyx_pybuffernd_f; + __Pyx_Buffer __pyx_pybuffer_f; + __Pyx_LocalBuf_ND __pyx_pybuffernd_g; + __Pyx_Buffer __pyx_pybuffer_g; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + PyArrayObject *__pyx_t_7 = NULL; + int __pyx_t_8; + unsigned int __pyx_t_9; + long __pyx_t_10; + unsigned int __pyx_t_11; + size_t __pyx_t_12; + size_t __pyx_t_13; + size_t __pyx_t_14; + size_t __pyx_t_15; + size_t __pyx_t_16; + size_t __pyx_t_17; + __Pyx_RefNannySetupContext("convolve1d_boundary_none", 0); + __pyx_pybuffer_conv.pybuffer.buf = NULL; + __pyx_pybuffer_conv.refcount = 0; + __pyx_pybuffernd_conv.data = NULL; + __pyx_pybuffernd_conv.rcbuffer = &__pyx_pybuffer_conv; + __pyx_pybuffer_f.pybuffer.buf = NULL; + __pyx_pybuffer_f.refcount = 0; + __pyx_pybuffernd_f.data = NULL; + __pyx_pybuffernd_f.rcbuffer = &__pyx_pybuffer_f; + __pyx_pybuffer_g.pybuffer.buf = NULL; + __pyx_pybuffer_g.refcount = 0; + __pyx_pybuffernd_g.data = NULL; + __pyx_pybuffernd_g.rcbuffer = &__pyx_pybuffer_g; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_f.rcbuffer->pybuffer, (PyObject*)__pyx_v_f, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 17, __pyx_L1_error) + } + __pyx_pybuffernd_f.diminfo[0].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_f.diminfo[0].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[0]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_g.rcbuffer->pybuffer, (PyObject*)__pyx_v_g, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 17, __pyx_L1_error) + } + __pyx_pybuffernd_g.diminfo[0].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_g.diminfo[0].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[0]; + + /* "astropy/convolution/boundary_none.pyx":21 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + __pyx_t_1 = ((__Pyx_mod_long((__pyx_v_g->dimensions[0]), 2) != 1) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_none.pyx":22 + * + * if g.shape[0] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(0, 22, __pyx_L1_error) + + /* "astropy/convolution/boundary_none.pyx":21 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + } + + /* "astropy/convolution/boundary_none.pyx":24 + * raise ValueError("Convolution kernel must have odd dimensions") + * + * assert f.dtype == DTYPE and g.dtype == DTYPE # <<<<<<<<<<<<<< + * + * cdef int nx = f.shape[0] + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_f), __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_5) { + } else { + __pyx_t_1 = __pyx_t_5; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_g), __pyx_n_s_dtype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = PyObject_RichCompare(__pyx_t_4, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_1 = __pyx_t_5; + __pyx_L4_bool_binop_done:; + if (unlikely(!__pyx_t_1)) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 24, __pyx_L1_error) + } + } + #endif + + /* "astropy/convolution/boundary_none.pyx":26 + * assert f.dtype == DTYPE and g.dtype == DTYPE + * + * cdef int nx = f.shape[0] # <<<<<<<<<<<<<< + * cdef int nkx = g.shape[0] + * cdef int wkx = nkx // 2 + */ + __pyx_v_nx = (__pyx_v_f->dimensions[0]); + + /* "astropy/convolution/boundary_none.pyx":27 + * + * cdef int nx = f.shape[0] + * cdef int nkx = g.shape[0] # <<<<<<<<<<<<<< + * cdef int wkx = nkx // 2 + * + */ + __pyx_v_nkx = (__pyx_v_g->dimensions[0]); + + /* "astropy/convolution/boundary_none.pyx":28 + * cdef int nx = f.shape[0] + * cdef int nkx = g.shape[0] + * cdef int wkx = nkx // 2 # <<<<<<<<<<<<<< + * + * # The following need to be set to zeros rather than empty because the + */ + __pyx_v_wkx = __Pyx_div_long(__pyx_v_nkx, 2); + + /* "astropy/convolution/boundary_none.pyx":32 + * # The following need to be set to zeros rather than empty because the + * # boundary does not get reset. + * cdef np.ndarray[DTYPE_t, ndim=1] conv = np.zeros([nx], dtype=DTYPE) # <<<<<<<<<<<<<< + * + * cdef unsigned int i, ii + */ + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = PyList_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_2); + PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); + __pyx_t_2 = 0; + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 32, __pyx_L1_error) + __pyx_t_7 = ((PyArrayObject *)__pyx_t_6); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_conv.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { + __pyx_v_conv = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 32, __pyx_L1_error) + } else {__pyx_pybuffernd_conv.diminfo[0].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_conv.diminfo[0].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[0]; + } + } + __pyx_t_7 = 0; + __pyx_v_conv = ((PyArrayObject *)__pyx_t_6); + __pyx_t_6 = 0; + + /* "astropy/convolution/boundary_none.pyx":41 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + + /* "astropy/convolution/boundary_none.pyx":44 + * + * # Now run the proper convolution + * for i in range(wkx, nx - wkx): # <<<<<<<<<<<<<< + * top = 0. + * bot = 0. + */ + __pyx_t_8 = (__pyx_v_nx - __pyx_v_wkx); + for (__pyx_t_9 = __pyx_v_wkx; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { + __pyx_v_i = __pyx_t_9; + + /* "astropy/convolution/boundary_none.pyx":45 + * # Now run the proper convolution + * for i in range(wkx, nx - wkx): + * top = 0. # <<<<<<<<<<<<<< + * bot = 0. + * for ii in range(i - wkx, i + wkx + 1): + */ + __pyx_v_top = 0.; + + /* "astropy/convolution/boundary_none.pyx":46 + * for i in range(wkx, nx - wkx): + * top = 0. + * bot = 0. # <<<<<<<<<<<<<< + * for ii in range(i - wkx, i + wkx + 1): + * val = f[ii] + */ + __pyx_v_bot = 0.; + + /* "astropy/convolution/boundary_none.pyx":47 + * top = 0. + * bot = 0. + * for ii in range(i - wkx, i + wkx + 1): # <<<<<<<<<<<<<< + * val = f[ii] + * ker = g[(nkx - 1 - (wkx + ii - i))] + */ + __pyx_t_10 = ((__pyx_v_i + __pyx_v_wkx) + 1); + for (__pyx_t_11 = (__pyx_v_i - __pyx_v_wkx); __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { + __pyx_v_ii = __pyx_t_11; + + /* "astropy/convolution/boundary_none.pyx":48 + * bot = 0. + * for ii in range(i - wkx, i + wkx + 1): + * val = f[ii] # <<<<<<<<<<<<<< + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): + */ + __pyx_t_12 = __pyx_v_ii; + __pyx_v_val = (*__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_f.diminfo[0].strides)); + + /* "astropy/convolution/boundary_none.pyx":49 + * for ii in range(i - wkx, i + wkx + 1): + * val = f[ii] + * ker = g[(nkx - 1 - (wkx + ii - i))] # <<<<<<<<<<<<<< + * if not npy_isnan(val): + * top += val * ker + */ + __pyx_t_13 = ((unsigned int)((__pyx_v_nkx - 1) - ((__pyx_v_wkx + __pyx_v_ii) - __pyx_v_i))); + __pyx_v_ker = (*__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_g.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_g.diminfo[0].strides)); + + /* "astropy/convolution/boundary_none.pyx":50 + * val = f[ii] + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + __pyx_t_1 = ((!(npy_isnan(__pyx_v_val) != 0)) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_none.pyx":51 + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): + * top += val * ker # <<<<<<<<<<<<<< + * bot += ker + * if normalize_by_kernel: + */ + __pyx_v_top = (__pyx_v_top + (__pyx_v_val * __pyx_v_ker)); + + /* "astropy/convolution/boundary_none.pyx":52 + * if not npy_isnan(val): + * top += val * ker + * bot += ker # <<<<<<<<<<<<<< + * if normalize_by_kernel: + * if bot == 0: + */ + __pyx_v_bot = (__pyx_v_bot + __pyx_v_ker); + + /* "astropy/convolution/boundary_none.pyx":50 + * val = f[ii] + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + } + } + + /* "astropy/convolution/boundary_none.pyx":53 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i] = f[i] + */ + __pyx_t_1 = (__pyx_v_normalize_by_kernel != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_none.pyx":54 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i] = f[i] + * else: + */ + __pyx_t_1 = ((__pyx_v_bot == 0.0) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_none.pyx":55 + * if normalize_by_kernel: + * if bot == 0: + * conv[i] = f[i] # <<<<<<<<<<<<<< + * else: + * conv[i] = top / bot + */ + __pyx_t_14 = __pyx_v_i; + __pyx_t_15 = __pyx_v_i; + *__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_conv.diminfo[0].strides) = (*__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_f.diminfo[0].strides)); + + /* "astropy/convolution/boundary_none.pyx":54 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i] = f[i] + * else: + */ + goto __pyx_L15; + } + + /* "astropy/convolution/boundary_none.pyx":57 + * conv[i] = f[i] + * else: + * conv[i] = top / bot # <<<<<<<<<<<<<< + * else: + * conv[i] = top + */ + /*else*/ { + if (unlikely(__pyx_v_bot == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 57, __pyx_L7_error) + } + __pyx_t_16 = __pyx_v_i; + *__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_conv.diminfo[0].strides) = (__pyx_v_top / __pyx_v_bot); + } + __pyx_L15:; + + /* "astropy/convolution/boundary_none.pyx":53 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i] = f[i] + */ + goto __pyx_L14; + } + + /* "astropy/convolution/boundary_none.pyx":59 + * conv[i] = top / bot + * else: + * conv[i] = top # <<<<<<<<<<<<<< + * # GIL acquired again here + * return conv + */ + /*else*/ { + __pyx_t_17 = __pyx_v_i; + *__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_conv.diminfo[0].strides) = __pyx_v_top; + } + __pyx_L14:; + } + } + + /* "astropy/convolution/boundary_none.pyx":41 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L8; + } + __pyx_L7_error: { + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L1_error; + } + __pyx_L8:; + } + } + + /* "astropy/convolution/boundary_none.pyx":61 + * conv[i] = top + * # GIL acquired again here + * return conv # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_conv)); + __pyx_r = ((PyObject *)__pyx_v_conv); + goto __pyx_L0; + + /* "astropy/convolution/boundary_none.pyx":17 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_none(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * bint normalize_by_kernel): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_6); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("astropy.convolution.boundary_none.convolve1d_boundary_none", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_conv); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/convolution/boundary_none.pyx":65 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_none(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * bint normalize_by_kernel): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_none_3convolve2d_boundary_none(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_11convolution_13boundary_none_3convolve2d_boundary_none = {"convolve2d_boundary_none", (PyCFunction)__pyx_pw_7astropy_11convolution_13boundary_none_3convolve2d_boundary_none, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_none_3convolve2d_boundary_none(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_f = 0; + PyArrayObject *__pyx_v_g = 0; + int __pyx_v_normalize_by_kernel; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("convolve2d_boundary_none (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_f,&__pyx_n_s_g,&__pyx_n_s_normalize_by_kernel,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_f)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_g)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve2d_boundary_none", 1, 3, 3, 1); __PYX_ERR(0, 65, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_normalize_by_kernel)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve2d_boundary_none", 1, 3, 3, 2); __PYX_ERR(0, 65, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "convolve2d_boundary_none") < 0)) __PYX_ERR(0, 65, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_f = ((PyArrayObject *)values[0]); + __pyx_v_g = ((PyArrayObject *)values[1]); + __pyx_v_normalize_by_kernel = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_normalize_by_kernel == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 67, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("convolve2d_boundary_none", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 65, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.convolution.boundary_none.convolve2d_boundary_none", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_f), __pyx_ptype_5numpy_ndarray, 1, "f", 0))) __PYX_ERR(0, 65, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_g), __pyx_ptype_5numpy_ndarray, 1, "g", 0))) __PYX_ERR(0, 66, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_11convolution_13boundary_none_2convolve2d_boundary_none(__pyx_self, __pyx_v_f, __pyx_v_g, __pyx_v_normalize_by_kernel); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_none_2convolve2d_boundary_none(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel) { + int __pyx_v_nx; + int __pyx_v_ny; + int __pyx_v_nkx; + int __pyx_v_nky; + int __pyx_v_wkx; + int __pyx_v_wky; + PyArrayObject *__pyx_v_conv = 0; + unsigned int __pyx_v_i; + unsigned int __pyx_v_j; + unsigned int __pyx_v_ii; + unsigned int __pyx_v_jj; + __pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t __pyx_v_top; + __pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t __pyx_v_bot; + __pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t __pyx_v_ker; + __pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t __pyx_v_val; + __Pyx_LocalBuf_ND __pyx_pybuffernd_conv; + __Pyx_Buffer __pyx_pybuffer_conv; + __Pyx_LocalBuf_ND __pyx_pybuffernd_f; + __Pyx_Buffer __pyx_pybuffer_f; + __Pyx_LocalBuf_ND __pyx_pybuffernd_g; + __Pyx_Buffer __pyx_pybuffer_g; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyArrayObject *__pyx_t_7 = NULL; + int __pyx_t_8; + unsigned int __pyx_t_9; + int __pyx_t_10; + unsigned int __pyx_t_11; + long __pyx_t_12; + unsigned int __pyx_t_13; + long __pyx_t_14; + unsigned int __pyx_t_15; + size_t __pyx_t_16; + size_t __pyx_t_17; + size_t __pyx_t_18; + size_t __pyx_t_19; + size_t __pyx_t_20; + size_t __pyx_t_21; + size_t __pyx_t_22; + size_t __pyx_t_23; + size_t __pyx_t_24; + size_t __pyx_t_25; + size_t __pyx_t_26; + size_t __pyx_t_27; + __Pyx_RefNannySetupContext("convolve2d_boundary_none", 0); + __pyx_pybuffer_conv.pybuffer.buf = NULL; + __pyx_pybuffer_conv.refcount = 0; + __pyx_pybuffernd_conv.data = NULL; + __pyx_pybuffernd_conv.rcbuffer = &__pyx_pybuffer_conv; + __pyx_pybuffer_f.pybuffer.buf = NULL; + __pyx_pybuffer_f.refcount = 0; + __pyx_pybuffernd_f.data = NULL; + __pyx_pybuffernd_f.rcbuffer = &__pyx_pybuffer_f; + __pyx_pybuffer_g.pybuffer.buf = NULL; + __pyx_pybuffer_g.refcount = 0; + __pyx_pybuffernd_g.data = NULL; + __pyx_pybuffernd_g.rcbuffer = &__pyx_pybuffer_g; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_f.rcbuffer->pybuffer, (PyObject*)__pyx_v_f, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 65, __pyx_L1_error) + } + __pyx_pybuffernd_f.diminfo[0].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_f.diminfo[0].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_f.diminfo[1].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_f.diminfo[1].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[1]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_g.rcbuffer->pybuffer, (PyObject*)__pyx_v_g, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 65, __pyx_L1_error) + } + __pyx_pybuffernd_g.diminfo[0].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_g.diminfo[0].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_g.diminfo[1].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_g.diminfo[1].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[1]; + + /* "astropy/convolution/boundary_none.pyx":69 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[0]), 2) != 1) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[1]), 2) != 1) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_none.pyx":70 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 70, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(0, 70, __pyx_L1_error) + + /* "astropy/convolution/boundary_none.pyx":69 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + } + + /* "astropy/convolution/boundary_none.pyx":72 + * raise ValueError("Convolution kernel must have odd dimensions") + * + * assert f.dtype == DTYPE and g.dtype == DTYPE # <<<<<<<<<<<<<< + * + * cdef int nx = f.shape[0] + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_f), __pyx_n_s_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 72, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 72, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 72, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 72, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L6_bool_binop_done; + } + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_g), __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 72, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 72, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 72, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 72, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_1 = __pyx_t_2; + __pyx_L6_bool_binop_done:; + if (unlikely(!__pyx_t_1)) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 72, __pyx_L1_error) + } + } + #endif + + /* "astropy/convolution/boundary_none.pyx":74 + * assert f.dtype == DTYPE and g.dtype == DTYPE + * + * cdef int nx = f.shape[0] # <<<<<<<<<<<<<< + * cdef int ny = f.shape[1] + * cdef int nkx = g.shape[0] + */ + __pyx_v_nx = (__pyx_v_f->dimensions[0]); + + /* "astropy/convolution/boundary_none.pyx":75 + * + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] # <<<<<<<<<<<<<< + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + */ + __pyx_v_ny = (__pyx_v_f->dimensions[1]); + + /* "astropy/convolution/boundary_none.pyx":76 + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] + * cdef int nkx = g.shape[0] # <<<<<<<<<<<<<< + * cdef int nky = g.shape[1] + * cdef int wkx = nkx // 2 + */ + __pyx_v_nkx = (__pyx_v_g->dimensions[0]); + + /* "astropy/convolution/boundary_none.pyx":77 + * cdef int ny = f.shape[1] + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] # <<<<<<<<<<<<<< + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + */ + __pyx_v_nky = (__pyx_v_g->dimensions[1]); + + /* "astropy/convolution/boundary_none.pyx":78 + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + * cdef int wkx = nkx // 2 # <<<<<<<<<<<<<< + * cdef int wky = nky // 2 + * + */ + __pyx_v_wkx = __Pyx_div_long(__pyx_v_nkx, 2); + + /* "astropy/convolution/boundary_none.pyx":79 + * cdef int nky = g.shape[1] + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 # <<<<<<<<<<<<<< + * + * # The following need to be set to zeros rather than empty because the + */ + __pyx_v_wky = __Pyx_div_long(__pyx_v_nky, 2); + + /* "astropy/convolution/boundary_none.pyx":83 + * # The following need to be set to zeros rather than empty because the + * # boundary does not get reset. + * cdef np.ndarray[DTYPE_t, ndim=2] conv = np.zeros([nx, ny], dtype=DTYPE) # <<<<<<<<<<<<<< + * + * cdef unsigned int i, j, ii, jj + */ + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_ny); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_3); + PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_5); + PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_5); + __pyx_t_3 = 0; + __pyx_t_5 = 0; + __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 83, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 83, __pyx_L1_error) + __pyx_t_7 = ((PyArrayObject *)__pyx_t_3); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_conv.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { + __pyx_v_conv = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 83, __pyx_L1_error) + } else {__pyx_pybuffernd_conv.diminfo[0].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_conv.diminfo[0].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_conv.diminfo[1].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_conv.diminfo[1].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[1]; + } + } + __pyx_t_7 = 0; + __pyx_v_conv = ((PyArrayObject *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "astropy/convolution/boundary_none.pyx":92 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + + /* "astropy/convolution/boundary_none.pyx":95 + * + * # Now run the proper convolution + * for i in range(wkx, nx - wkx): # <<<<<<<<<<<<<< + * for j in range(wky, ny - wky): + * top = 0. + */ + __pyx_t_8 = (__pyx_v_nx - __pyx_v_wkx); + for (__pyx_t_9 = __pyx_v_wkx; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { + __pyx_v_i = __pyx_t_9; + + /* "astropy/convolution/boundary_none.pyx":96 + * # Now run the proper convolution + * for i in range(wkx, nx - wkx): + * for j in range(wky, ny - wky): # <<<<<<<<<<<<<< + * top = 0. + * bot = 0. + */ + __pyx_t_10 = (__pyx_v_ny - __pyx_v_wky); + for (__pyx_t_11 = __pyx_v_wky; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { + __pyx_v_j = __pyx_t_11; + + /* "astropy/convolution/boundary_none.pyx":97 + * for i in range(wkx, nx - wkx): + * for j in range(wky, ny - wky): + * top = 0. # <<<<<<<<<<<<<< + * bot = 0. + * for ii in range(i - wkx, i + wkx + 1): + */ + __pyx_v_top = 0.; + + /* "astropy/convolution/boundary_none.pyx":98 + * for j in range(wky, ny - wky): + * top = 0. + * bot = 0. # <<<<<<<<<<<<<< + * for ii in range(i - wkx, i + wkx + 1): + * for jj in range(j - wky, j + wky + 1): + */ + __pyx_v_bot = 0.; + + /* "astropy/convolution/boundary_none.pyx":99 + * top = 0. + * bot = 0. + * for ii in range(i - wkx, i + wkx + 1): # <<<<<<<<<<<<<< + * for jj in range(j - wky, j + wky + 1): + * val = f[ii, jj] + */ + __pyx_t_12 = ((__pyx_v_i + __pyx_v_wkx) + 1); + for (__pyx_t_13 = (__pyx_v_i - __pyx_v_wkx); __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { + __pyx_v_ii = __pyx_t_13; + + /* "astropy/convolution/boundary_none.pyx":100 + * bot = 0. + * for ii in range(i - wkx, i + wkx + 1): + * for jj in range(j - wky, j + wky + 1): # <<<<<<<<<<<<<< + * val = f[ii, jj] + * ker = g[(nkx - 1 - (wkx + ii - i)), + */ + __pyx_t_14 = ((__pyx_v_j + __pyx_v_wky) + 1); + for (__pyx_t_15 = (__pyx_v_j - __pyx_v_wky); __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { + __pyx_v_jj = __pyx_t_15; + + /* "astropy/convolution/boundary_none.pyx":101 + * for ii in range(i - wkx, i + wkx + 1): + * for jj in range(j - wky, j + wky + 1): + * val = f[ii, jj] # <<<<<<<<<<<<<< + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j))] + */ + __pyx_t_16 = __pyx_v_ii; + __pyx_t_17 = __pyx_v_jj; + __pyx_v_val = (*__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_f.diminfo[1].strides)); + + /* "astropy/convolution/boundary_none.pyx":102 + * for jj in range(j - wky, j + wky + 1): + * val = f[ii, jj] + * ker = g[(nkx - 1 - (wkx + ii - i)), # <<<<<<<<<<<<<< + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): + */ + __pyx_t_18 = ((unsigned int)((__pyx_v_nkx - 1) - ((__pyx_v_wkx + __pyx_v_ii) - __pyx_v_i))); + __pyx_t_19 = ((unsigned int)((__pyx_v_nky - 1) - ((__pyx_v_wky + __pyx_v_jj) - __pyx_v_j))); + __pyx_v_ker = (*__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_g.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_g.diminfo[0].strides, __pyx_t_19, __pyx_pybuffernd_g.diminfo[1].strides)); + + /* "astropy/convolution/boundary_none.pyx":104 + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + __pyx_t_1 = ((!(npy_isnan(__pyx_v_val) != 0)) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_none.pyx":105 + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): + * top += val * ker # <<<<<<<<<<<<<< + * bot += ker + * if normalize_by_kernel: + */ + __pyx_v_top = (__pyx_v_top + (__pyx_v_val * __pyx_v_ker)); + + /* "astropy/convolution/boundary_none.pyx":106 + * if not npy_isnan(val): + * top += val * ker + * bot += ker # <<<<<<<<<<<<<< + * if normalize_by_kernel: + * if bot == 0: + */ + __pyx_v_bot = (__pyx_v_bot + __pyx_v_ker); + + /* "astropy/convolution/boundary_none.pyx":104 + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + } + } + } + + /* "astropy/convolution/boundary_none.pyx":107 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j] = f[i, j] + */ + __pyx_t_1 = (__pyx_v_normalize_by_kernel != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_none.pyx":108 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j] = f[i, j] + * else: + */ + __pyx_t_1 = ((__pyx_v_bot == 0.0) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_none.pyx":109 + * if normalize_by_kernel: + * if bot == 0: + * conv[i, j] = f[i, j] # <<<<<<<<<<<<<< + * else: + * conv[i, j] = top / bot + */ + __pyx_t_20 = __pyx_v_i; + __pyx_t_21 = __pyx_v_j; + __pyx_t_22 = __pyx_v_i; + __pyx_t_23 = __pyx_v_j; + *__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_23, __pyx_pybuffernd_conv.diminfo[1].strides) = (*__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_20, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_21, __pyx_pybuffernd_f.diminfo[1].strides)); + + /* "astropy/convolution/boundary_none.pyx":108 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j] = f[i, j] + * else: + */ + goto __pyx_L21; + } + + /* "astropy/convolution/boundary_none.pyx":111 + * conv[i, j] = f[i, j] + * else: + * conv[i, j] = top / bot # <<<<<<<<<<<<<< + * else: + * conv[i, j] = top + */ + /*else*/ { + if (unlikely(__pyx_v_bot == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 111, __pyx_L9_error) + } + __pyx_t_24 = __pyx_v_i; + __pyx_t_25 = __pyx_v_j; + *__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_24, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_25, __pyx_pybuffernd_conv.diminfo[1].strides) = (__pyx_v_top / __pyx_v_bot); + } + __pyx_L21:; + + /* "astropy/convolution/boundary_none.pyx":107 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j] = f[i, j] + */ + goto __pyx_L20; + } + + /* "astropy/convolution/boundary_none.pyx":113 + * conv[i, j] = top / bot + * else: + * conv[i, j] = top # <<<<<<<<<<<<<< + * # GIL acquired again here + * return conv + */ + /*else*/ { + __pyx_t_26 = __pyx_v_i; + __pyx_t_27 = __pyx_v_j; + *__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_26, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_27, __pyx_pybuffernd_conv.diminfo[1].strides) = __pyx_v_top; + } + __pyx_L20:; + } + } + } + + /* "astropy/convolution/boundary_none.pyx":92 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L10; + } + __pyx_L9_error: { + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L1_error; + } + __pyx_L10:; + } + } + + /* "astropy/convolution/boundary_none.pyx":115 + * conv[i, j] = top + * # GIL acquired again here + * return conv # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_conv)); + __pyx_r = ((PyObject *)__pyx_v_conv); + goto __pyx_L0; + + /* "astropy/convolution/boundary_none.pyx":65 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_none(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * bint normalize_by_kernel): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("astropy.convolution.boundary_none.convolve2d_boundary_none", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_conv); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/convolution/boundary_none.pyx":119 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_none(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * bint normalize_by_kernel): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_none_5convolve3d_boundary_none(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_11convolution_13boundary_none_5convolve3d_boundary_none = {"convolve3d_boundary_none", (PyCFunction)__pyx_pw_7astropy_11convolution_13boundary_none_5convolve3d_boundary_none, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_none_5convolve3d_boundary_none(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_f = 0; + PyArrayObject *__pyx_v_g = 0; + int __pyx_v_normalize_by_kernel; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("convolve3d_boundary_none (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_f,&__pyx_n_s_g,&__pyx_n_s_normalize_by_kernel,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_f)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_g)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve3d_boundary_none", 1, 3, 3, 1); __PYX_ERR(0, 119, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_normalize_by_kernel)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve3d_boundary_none", 1, 3, 3, 2); __PYX_ERR(0, 119, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "convolve3d_boundary_none") < 0)) __PYX_ERR(0, 119, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_f = ((PyArrayObject *)values[0]); + __pyx_v_g = ((PyArrayObject *)values[1]); + __pyx_v_normalize_by_kernel = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_normalize_by_kernel == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 121, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("convolve3d_boundary_none", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 119, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.convolution.boundary_none.convolve3d_boundary_none", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_f), __pyx_ptype_5numpy_ndarray, 1, "f", 0))) __PYX_ERR(0, 119, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_g), __pyx_ptype_5numpy_ndarray, 1, "g", 0))) __PYX_ERR(0, 120, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_11convolution_13boundary_none_4convolve3d_boundary_none(__pyx_self, __pyx_v_f, __pyx_v_g, __pyx_v_normalize_by_kernel); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_none_4convolve3d_boundary_none(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel) { + int __pyx_v_nx; + int __pyx_v_ny; + int __pyx_v_nz; + int __pyx_v_nkx; + int __pyx_v_nky; + int __pyx_v_nkz; + int __pyx_v_wkx; + int __pyx_v_wky; + int __pyx_v_wkz; + PyArrayObject *__pyx_v_conv = 0; + unsigned int __pyx_v_i; + unsigned int __pyx_v_j; + unsigned int __pyx_v_k; + unsigned int __pyx_v_ii; + unsigned int __pyx_v_jj; + unsigned int __pyx_v_kk; + __pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t __pyx_v_top; + __pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t __pyx_v_bot; + __pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t __pyx_v_ker; + __pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t __pyx_v_val; + __Pyx_LocalBuf_ND __pyx_pybuffernd_conv; + __Pyx_Buffer __pyx_pybuffer_conv; + __Pyx_LocalBuf_ND __pyx_pybuffernd_f; + __Pyx_Buffer __pyx_pybuffer_f; + __Pyx_LocalBuf_ND __pyx_pybuffernd_g; + __Pyx_Buffer __pyx_pybuffer_g; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyArrayObject *__pyx_t_8 = NULL; + int __pyx_t_9; + unsigned int __pyx_t_10; + int __pyx_t_11; + unsigned int __pyx_t_12; + int __pyx_t_13; + unsigned int __pyx_t_14; + long __pyx_t_15; + unsigned int __pyx_t_16; + long __pyx_t_17; + unsigned int __pyx_t_18; + long __pyx_t_19; + unsigned int __pyx_t_20; + size_t __pyx_t_21; + size_t __pyx_t_22; + size_t __pyx_t_23; + size_t __pyx_t_24; + size_t __pyx_t_25; + size_t __pyx_t_26; + size_t __pyx_t_27; + size_t __pyx_t_28; + size_t __pyx_t_29; + size_t __pyx_t_30; + size_t __pyx_t_31; + size_t __pyx_t_32; + size_t __pyx_t_33; + size_t __pyx_t_34; + size_t __pyx_t_35; + size_t __pyx_t_36; + size_t __pyx_t_37; + size_t __pyx_t_38; + __Pyx_RefNannySetupContext("convolve3d_boundary_none", 0); + __pyx_pybuffer_conv.pybuffer.buf = NULL; + __pyx_pybuffer_conv.refcount = 0; + __pyx_pybuffernd_conv.data = NULL; + __pyx_pybuffernd_conv.rcbuffer = &__pyx_pybuffer_conv; + __pyx_pybuffer_f.pybuffer.buf = NULL; + __pyx_pybuffer_f.refcount = 0; + __pyx_pybuffernd_f.data = NULL; + __pyx_pybuffernd_f.rcbuffer = &__pyx_pybuffer_f; + __pyx_pybuffer_g.pybuffer.buf = NULL; + __pyx_pybuffer_g.refcount = 0; + __pyx_pybuffernd_g.data = NULL; + __pyx_pybuffernd_g.rcbuffer = &__pyx_pybuffer_g; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_f.rcbuffer->pybuffer, (PyObject*)__pyx_v_f, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 119, __pyx_L1_error) + } + __pyx_pybuffernd_f.diminfo[0].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_f.diminfo[0].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_f.diminfo[1].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_f.diminfo[1].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_f.diminfo[2].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_f.diminfo[2].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[2]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_g.rcbuffer->pybuffer, (PyObject*)__pyx_v_g, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 119, __pyx_L1_error) + } + __pyx_pybuffernd_g.diminfo[0].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_g.diminfo[0].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_g.diminfo[1].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_g.diminfo[1].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_g.diminfo[2].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_g.diminfo[2].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[2]; + + /* "astropy/convolution/boundary_none.pyx":123 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[0]), 2) != 1) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[1]), 2) != 1) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[2]), 2) != 1) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_none.pyx":124 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 124, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(0, 124, __pyx_L1_error) + + /* "astropy/convolution/boundary_none.pyx":123 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + } + + /* "astropy/convolution/boundary_none.pyx":126 + * raise ValueError("Convolution kernel must have odd dimensions") + * + * assert f.dtype == DTYPE and g.dtype == DTYPE # <<<<<<<<<<<<<< + * + * cdef int nx = f.shape[0] + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_f), __pyx_n_s_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_g), __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_1 = __pyx_t_2; + __pyx_L7_bool_binop_done:; + if (unlikely(!__pyx_t_1)) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 126, __pyx_L1_error) + } + } + #endif + + /* "astropy/convolution/boundary_none.pyx":128 + * assert f.dtype == DTYPE and g.dtype == DTYPE + * + * cdef int nx = f.shape[0] # <<<<<<<<<<<<<< + * cdef int ny = f.shape[1] + * cdef int nz = f.shape[2] + */ + __pyx_v_nx = (__pyx_v_f->dimensions[0]); + + /* "astropy/convolution/boundary_none.pyx":129 + * + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] # <<<<<<<<<<<<<< + * cdef int nz = f.shape[2] + * cdef int nkx = g.shape[0] + */ + __pyx_v_ny = (__pyx_v_f->dimensions[1]); + + /* "astropy/convolution/boundary_none.pyx":130 + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] + * cdef int nz = f.shape[2] # <<<<<<<<<<<<<< + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + */ + __pyx_v_nz = (__pyx_v_f->dimensions[2]); + + /* "astropy/convolution/boundary_none.pyx":131 + * cdef int ny = f.shape[1] + * cdef int nz = f.shape[2] + * cdef int nkx = g.shape[0] # <<<<<<<<<<<<<< + * cdef int nky = g.shape[1] + * cdef int nkz = g.shape[2] + */ + __pyx_v_nkx = (__pyx_v_g->dimensions[0]); + + /* "astropy/convolution/boundary_none.pyx":132 + * cdef int nz = f.shape[2] + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] # <<<<<<<<<<<<<< + * cdef int nkz = g.shape[2] + * cdef int wkx = nkx // 2 + */ + __pyx_v_nky = (__pyx_v_g->dimensions[1]); + + /* "astropy/convolution/boundary_none.pyx":133 + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + * cdef int nkz = g.shape[2] # <<<<<<<<<<<<<< + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + */ + __pyx_v_nkz = (__pyx_v_g->dimensions[2]); + + /* "astropy/convolution/boundary_none.pyx":134 + * cdef int nky = g.shape[1] + * cdef int nkz = g.shape[2] + * cdef int wkx = nkx // 2 # <<<<<<<<<<<<<< + * cdef int wky = nky // 2 + * cdef int wkz = nkz // 2 + */ + __pyx_v_wkx = __Pyx_div_long(__pyx_v_nkx, 2); + + /* "astropy/convolution/boundary_none.pyx":135 + * cdef int nkz = g.shape[2] + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 # <<<<<<<<<<<<<< + * cdef int wkz = nkz // 2 + * + */ + __pyx_v_wky = __Pyx_div_long(__pyx_v_nky, 2); + + /* "astropy/convolution/boundary_none.pyx":136 + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + * cdef int wkz = nkz // 2 # <<<<<<<<<<<<<< + * + * # The following need to be set to zeros rather than empty because the + */ + __pyx_v_wkz = __Pyx_div_long(__pyx_v_nkz, 2); + + /* "astropy/convolution/boundary_none.pyx":140 + * # The following need to be set to zeros rather than empty because the + * # boundary does not get reset. + * cdef np.ndarray[DTYPE_t, ndim=3] conv = np.zeros([nx, ny, nz], dtype=DTYPE) # <<<<<<<<<<<<<< + * + * cdef unsigned int i, j, k, ii, jj, kk + */ + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_ny); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_nz); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_GIVEREF(__pyx_t_3); + PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_5); + PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_6); + PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_6); + __pyx_t_3 = 0; + __pyx_t_5 = 0; + __pyx_t_6 = 0; + __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_7); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); + __pyx_t_7 = 0; + __pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, __pyx_t_7); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 140, __pyx_L1_error) + __pyx_t_8 = ((PyArrayObject *)__pyx_t_5); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_conv.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { + __pyx_v_conv = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 140, __pyx_L1_error) + } else {__pyx_pybuffernd_conv.diminfo[0].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_conv.diminfo[0].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_conv.diminfo[1].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_conv.diminfo[1].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_conv.diminfo[2].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_conv.diminfo[2].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[2]; + } + } + __pyx_t_8 = 0; + __pyx_v_conv = ((PyArrayObject *)__pyx_t_5); + __pyx_t_5 = 0; + + /* "astropy/convolution/boundary_none.pyx":149 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + + /* "astropy/convolution/boundary_none.pyx":152 + * + * # Now run the proper convolution + * for i in range(wkx, nx - wkx): # <<<<<<<<<<<<<< + * for j in range(wky, ny - wky): + * for k in range(wkz, nz - wkz): + */ + __pyx_t_9 = (__pyx_v_nx - __pyx_v_wkx); + for (__pyx_t_10 = __pyx_v_wkx; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { + __pyx_v_i = __pyx_t_10; + + /* "astropy/convolution/boundary_none.pyx":153 + * # Now run the proper convolution + * for i in range(wkx, nx - wkx): + * for j in range(wky, ny - wky): # <<<<<<<<<<<<<< + * for k in range(wkz, nz - wkz): + * top = 0. + */ + __pyx_t_11 = (__pyx_v_ny - __pyx_v_wky); + for (__pyx_t_12 = __pyx_v_wky; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { + __pyx_v_j = __pyx_t_12; + + /* "astropy/convolution/boundary_none.pyx":154 + * for i in range(wkx, nx - wkx): + * for j in range(wky, ny - wky): + * for k in range(wkz, nz - wkz): # <<<<<<<<<<<<<< + * top = 0. + * bot = 0. + */ + __pyx_t_13 = (__pyx_v_nz - __pyx_v_wkz); + for (__pyx_t_14 = __pyx_v_wkz; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { + __pyx_v_k = __pyx_t_14; + + /* "astropy/convolution/boundary_none.pyx":155 + * for j in range(wky, ny - wky): + * for k in range(wkz, nz - wkz): + * top = 0. # <<<<<<<<<<<<<< + * bot = 0. + * for ii in range(i - wkx, i + wkx + 1): + */ + __pyx_v_top = 0.; + + /* "astropy/convolution/boundary_none.pyx":156 + * for k in range(wkz, nz - wkz): + * top = 0. + * bot = 0. # <<<<<<<<<<<<<< + * for ii in range(i - wkx, i + wkx + 1): + * for jj in range(j - wky, j + wky + 1): + */ + __pyx_v_bot = 0.; + + /* "astropy/convolution/boundary_none.pyx":157 + * top = 0. + * bot = 0. + * for ii in range(i - wkx, i + wkx + 1): # <<<<<<<<<<<<<< + * for jj in range(j - wky, j + wky + 1): + * for kk in range(k - wkz, k + wkz + 1): + */ + __pyx_t_15 = ((__pyx_v_i + __pyx_v_wkx) + 1); + for (__pyx_t_16 = (__pyx_v_i - __pyx_v_wkx); __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) { + __pyx_v_ii = __pyx_t_16; + + /* "astropy/convolution/boundary_none.pyx":158 + * bot = 0. + * for ii in range(i - wkx, i + wkx + 1): + * for jj in range(j - wky, j + wky + 1): # <<<<<<<<<<<<<< + * for kk in range(k - wkz, k + wkz + 1): + * val = f[ii, jj, kk] + */ + __pyx_t_17 = ((__pyx_v_j + __pyx_v_wky) + 1); + for (__pyx_t_18 = (__pyx_v_j - __pyx_v_wky); __pyx_t_18 < __pyx_t_17; __pyx_t_18+=1) { + __pyx_v_jj = __pyx_t_18; + + /* "astropy/convolution/boundary_none.pyx":159 + * for ii in range(i - wkx, i + wkx + 1): + * for jj in range(j - wky, j + wky + 1): + * for kk in range(k - wkz, k + wkz + 1): # <<<<<<<<<<<<<< + * val = f[ii, jj, kk] + * ker = g[(nkx - 1 - (wkx + ii - i)), + */ + __pyx_t_19 = ((__pyx_v_k + __pyx_v_wkz) + 1); + for (__pyx_t_20 = (__pyx_v_k - __pyx_v_wkz); __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) { + __pyx_v_kk = __pyx_t_20; + + /* "astropy/convolution/boundary_none.pyx":160 + * for jj in range(j - wky, j + wky + 1): + * for kk in range(k - wkz, k + wkz + 1): + * val = f[ii, jj, kk] # <<<<<<<<<<<<<< + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j)), + */ + __pyx_t_21 = __pyx_v_ii; + __pyx_t_22 = __pyx_v_jj; + __pyx_t_23 = __pyx_v_kk; + __pyx_v_val = (*__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_22, __pyx_pybuffernd_f.diminfo[1].strides, __pyx_t_23, __pyx_pybuffernd_f.diminfo[2].strides)); + + /* "astropy/convolution/boundary_none.pyx":161 + * for kk in range(k - wkz, k + wkz + 1): + * val = f[ii, jj, kk] + * ker = g[(nkx - 1 - (wkx + ii - i)), # <<<<<<<<<<<<<< + * (nky - 1 - (wky + jj - j)), + * (nkz - 1 - (wkz + kk - k))] + */ + __pyx_t_24 = ((unsigned int)((__pyx_v_nkx - 1) - ((__pyx_v_wkx + __pyx_v_ii) - __pyx_v_i))); + __pyx_t_25 = ((unsigned int)((__pyx_v_nky - 1) - ((__pyx_v_wky + __pyx_v_jj) - __pyx_v_j))); + __pyx_t_26 = ((unsigned int)((__pyx_v_nkz - 1) - ((__pyx_v_wkz + __pyx_v_kk) - __pyx_v_k))); + __pyx_v_ker = (*__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_g.rcbuffer->pybuffer.buf, __pyx_t_24, __pyx_pybuffernd_g.diminfo[0].strides, __pyx_t_25, __pyx_pybuffernd_g.diminfo[1].strides, __pyx_t_26, __pyx_pybuffernd_g.diminfo[2].strides)); + + /* "astropy/convolution/boundary_none.pyx":164 + * (nky - 1 - (wky + jj - j)), + * (nkz - 1 - (wkz + kk - k))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + __pyx_t_1 = ((!(npy_isnan(__pyx_v_val) != 0)) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_none.pyx":165 + * (nkz - 1 - (wkz + kk - k))] + * if not npy_isnan(val): + * top += val * ker # <<<<<<<<<<<<<< + * bot += ker + * if normalize_by_kernel: + */ + __pyx_v_top = (__pyx_v_top + (__pyx_v_val * __pyx_v_ker)); + + /* "astropy/convolution/boundary_none.pyx":166 + * if not npy_isnan(val): + * top += val * ker + * bot += ker # <<<<<<<<<<<<<< + * if normalize_by_kernel: + * if bot == 0: + */ + __pyx_v_bot = (__pyx_v_bot + __pyx_v_ker); + + /* "astropy/convolution/boundary_none.pyx":164 + * (nky - 1 - (wky + jj - j)), + * (nkz - 1 - (wkz + kk - k))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + } + } + } + } + + /* "astropy/convolution/boundary_none.pyx":167 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j, k] = f[i, j, k] + */ + __pyx_t_1 = (__pyx_v_normalize_by_kernel != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_none.pyx":168 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j, k] = f[i, j, k] + * else: + */ + __pyx_t_1 = ((__pyx_v_bot == 0.0) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_none.pyx":169 + * if normalize_by_kernel: + * if bot == 0: + * conv[i, j, k] = f[i, j, k] # <<<<<<<<<<<<<< + * else: + * conv[i, j, k] = top / bot + */ + __pyx_t_27 = __pyx_v_i; + __pyx_t_28 = __pyx_v_j; + __pyx_t_29 = __pyx_v_k; + __pyx_t_30 = __pyx_v_i; + __pyx_t_31 = __pyx_v_j; + __pyx_t_32 = __pyx_v_k; + *__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_30, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_31, __pyx_pybuffernd_conv.diminfo[1].strides, __pyx_t_32, __pyx_pybuffernd_conv.diminfo[2].strides) = (*__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_27, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_28, __pyx_pybuffernd_f.diminfo[1].strides, __pyx_t_29, __pyx_pybuffernd_f.diminfo[2].strides)); + + /* "astropy/convolution/boundary_none.pyx":168 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j, k] = f[i, j, k] + * else: + */ + goto __pyx_L26; + } + + /* "astropy/convolution/boundary_none.pyx":171 + * conv[i, j, k] = f[i, j, k] + * else: + * conv[i, j, k] = top / bot # <<<<<<<<<<<<<< + * else: + * conv[i, j, k] = top + */ + /*else*/ { + if (unlikely(__pyx_v_bot == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 171, __pyx_L10_error) + } + __pyx_t_33 = __pyx_v_i; + __pyx_t_34 = __pyx_v_j; + __pyx_t_35 = __pyx_v_k; + *__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_33, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_34, __pyx_pybuffernd_conv.diminfo[1].strides, __pyx_t_35, __pyx_pybuffernd_conv.diminfo[2].strides) = (__pyx_v_top / __pyx_v_bot); + } + __pyx_L26:; + + /* "astropy/convolution/boundary_none.pyx":167 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j, k] = f[i, j, k] + */ + goto __pyx_L25; + } + + /* "astropy/convolution/boundary_none.pyx":173 + * conv[i, j, k] = top / bot + * else: + * conv[i, j, k] = top # <<<<<<<<<<<<<< + * # GIL acquired again here + * return conv + */ + /*else*/ { + __pyx_t_36 = __pyx_v_i; + __pyx_t_37 = __pyx_v_j; + __pyx_t_38 = __pyx_v_k; + *__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_none_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_36, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_37, __pyx_pybuffernd_conv.diminfo[1].strides, __pyx_t_38, __pyx_pybuffernd_conv.diminfo[2].strides) = __pyx_v_top; + } + __pyx_L25:; + } + } + } + } + + /* "astropy/convolution/boundary_none.pyx":149 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L11; + } + __pyx_L10_error: { + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L1_error; + } + __pyx_L11:; + } + } + + /* "astropy/convolution/boundary_none.pyx":175 + * conv[i, j, k] = top + * # GIL acquired again here + * return conv # <<<<<<<<<<<<<< + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_conv)); + __pyx_r = ((PyObject *)__pyx_v_conv); + goto __pyx_L0; + + /* "astropy/convolution/boundary_none.pyx":119 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_none(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * bint normalize_by_kernel): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("astropy.convolution.boundary_none.convolve3d_boundary_none", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_conv); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 + * # experimental exception made for __getbuffer__ and __releasebuffer__ + * # -- the details of this may change. + * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< + * # This implementation of getbuffer is geared towards Cython + * # requirements, and does not yet fullfill the PEP. + */ + +/* Python wrapper */ +static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_v_copy_shape; + int __pyx_v_i; + int __pyx_v_ndim; + int __pyx_v_endian_detector; + int __pyx_v_little_endian; + int __pyx_v_t; + char *__pyx_v_f; + PyArray_Descr *__pyx_v_descr = 0; + int __pyx_v_offset; + int __pyx_v_hasfields; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + char *__pyx_t_7; + __Pyx_RefNannySetupContext("__getbuffer__", 0); + if (__pyx_v_info != NULL) { + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":220 + * # of flags + * + * if info == NULL: return # <<<<<<<<<<<<<< + * + * cdef int copy_shape, i, ndim + */ + __pyx_t_1 = ((__pyx_v_info == NULL) != 0); + if (__pyx_t_1) { + __pyx_r = 0; + goto __pyx_L0; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":223 + * + * cdef int copy_shape, i, ndim + * cdef int endian_detector = 1 # <<<<<<<<<<<<<< + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * + */ + __pyx_v_endian_detector = 1; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":224 + * cdef int copy_shape, i, ndim + * cdef int endian_detector = 1 + * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< + * + * ndim = PyArray_NDIM(self) + */ + __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":226 + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * + * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 + * ndim = PyArray_NDIM(self) + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * copy_shape = 1 + * else: + */ + __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":229 + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * copy_shape = 1 # <<<<<<<<<<<<<< + * else: + * copy_shape = 0 + */ + __pyx_v_copy_shape = 1; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 + * ndim = PyArray_NDIM(self) + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * copy_shape = 1 + * else: + */ + goto __pyx_L4; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231 + * copy_shape = 1 + * else: + * copy_shape = 0 # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + */ + /*else*/ { + __pyx_v_copy_shape = 0; + } + __pyx_L4:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L6_bool_binop_done; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":234 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< + * raise ValueError(u"ndarray is not C contiguous") + * + */ + __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L6_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 235, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L9_bool_binop_done; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":238 + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< + * raise ValueError(u"ndarray is not Fortran contiguous") + * + */ + __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L9_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< + * + * info.buf = PyArray_DATA(self) + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 239, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 239, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":241 + * raise ValueError(u"ndarray is not Fortran contiguous") + * + * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< + * info.ndim = ndim + * if copy_shape: + */ + __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":242 + * + * info.buf = PyArray_DATA(self) + * info.ndim = ndim # <<<<<<<<<<<<<< + * if copy_shape: + * # Allocate new buffer for strides and shape info. + */ + __pyx_v_info->ndim = __pyx_v_ndim; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 + * info.buf = PyArray_DATA(self) + * info.ndim = ndim + * if copy_shape: # <<<<<<<<<<<<<< + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + */ + __pyx_t_1 = (__pyx_v_copy_shape != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":246 + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) # <<<<<<<<<<<<<< + * info.shape = info.strides + ndim + * for i in range(ndim): + */ + __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":247 + * # This is allocated as one block, strides first. + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) + * info.shape = info.strides + ndim # <<<<<<<<<<<<<< + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] + */ + __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":248 + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) + * info.shape = info.strides + ndim + * for i in range(ndim): # <<<<<<<<<<<<<< + * info.strides[i] = PyArray_STRIDES(self)[i] + * info.shape[i] = PyArray_DIMS(self)[i] + */ + __pyx_t_4 = __pyx_v_ndim; + for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { + __pyx_v_i = __pyx_t_5; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":249 + * info.shape = info.strides + ndim + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< + * info.shape[i] = PyArray_DIMS(self)[i] + * else: + */ + (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":250 + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] + * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< + * else: + * info.strides = PyArray_STRIDES(self) + */ + (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 + * info.buf = PyArray_DATA(self) + * info.ndim = ndim + * if copy_shape: # <<<<<<<<<<<<<< + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + */ + goto __pyx_L11; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":252 + * info.shape[i] = PyArray_DIMS(self)[i] + * else: + * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL + */ + /*else*/ { + __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":253 + * else: + * info.strides = PyArray_STRIDES(self) + * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) + */ + __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); + } + __pyx_L11:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":254 + * info.strides = PyArray_STRIDES(self) + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * info.itemsize = PyArray_ITEMSIZE(self) + * info.readonly = not PyArray_ISWRITEABLE(self) + */ + __pyx_v_info->suboffsets = NULL; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":255 + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< + * info.readonly = not PyArray_ISWRITEABLE(self) + * + */ + __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":256 + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) + * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< + * + * cdef int t + */ + __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":259 + * + * cdef int t + * cdef char* f = NULL # <<<<<<<<<<<<<< + * cdef dtype descr = self.descr + * cdef int offset + */ + __pyx_v_f = NULL; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":260 + * cdef int t + * cdef char* f = NULL + * cdef dtype descr = self.descr # <<<<<<<<<<<<<< + * cdef int offset + * + */ + __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); + __Pyx_INCREF(__pyx_t_3); + __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":263 + * cdef int offset + * + * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< + * + * if not hasfields and not copy_shape: + */ + __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 + * cdef bint hasfields = PyDataType_HASFIELDS(descr) + * + * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< + * # do not call releasebuffer + * info.obj = None + */ + __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L15_bool_binop_done; + } + __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L15_bool_binop_done:; + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":267 + * if not hasfields and not copy_shape: + * # do not call releasebuffer + * info.obj = None # <<<<<<<<<<<<<< + * else: + * # need to call releasebuffer + */ + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = Py_None; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 + * cdef bint hasfields = PyDataType_HASFIELDS(descr) + * + * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< + * # do not call releasebuffer + * info.obj = None + */ + goto __pyx_L14; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 + * else: + * # need to call releasebuffer + * info.obj = self # <<<<<<<<<<<<<< + * + * if not hasfields: + */ + /*else*/ { + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + } + __pyx_L14:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 + * info.obj = self + * + * if not hasfields: # <<<<<<<<<<<<<< + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + */ + __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":273 + * + * if not hasfields: + * t = descr.type_num # <<<<<<<<<<<<<< + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + */ + __pyx_t_4 = __pyx_v_descr->type_num; + __pyx_v_t = __pyx_t_4; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); + if (!__pyx_t_2) { + goto __pyx_L20_next_or; + } else { + } + __pyx_t_2 = (__pyx_v_little_endian != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L19_bool_binop_done; + } + __pyx_L20_next_or:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275 + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" + */ + __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L19_bool_binop_done; + } + __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L19_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 276, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":277 + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" + */ + switch (__pyx_v_t) { + case NPY_BYTE: + __pyx_v_f = ((char *)"b"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278 + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" + */ + case NPY_UBYTE: + __pyx_v_f = ((char *)"B"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279 + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" + */ + case NPY_SHORT: + __pyx_v_f = ((char *)"h"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" + */ + case NPY_USHORT: + __pyx_v_f = ((char *)"H"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":281 + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" + */ + case NPY_INT: + __pyx_v_f = ((char *)"i"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":282 + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" + */ + case NPY_UINT: + __pyx_v_f = ((char *)"I"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283 + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" + */ + case NPY_LONG: + __pyx_v_f = ((char *)"l"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284 + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" + */ + case NPY_ULONG: + __pyx_v_f = ((char *)"L"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":285 + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" + */ + case NPY_LONGLONG: + __pyx_v_f = ((char *)"q"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286 + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" + */ + case NPY_ULONGLONG: + __pyx_v_f = ((char *)"Q"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287 + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" + */ + case NPY_FLOAT: + __pyx_v_f = ((char *)"f"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":288 + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" + */ + case NPY_DOUBLE: + __pyx_v_f = ((char *)"d"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289 + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" + */ + case NPY_LONGDOUBLE: + __pyx_v_f = ((char *)"g"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":290 + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" + */ + case NPY_CFLOAT: + __pyx_v_f = ((char *)"Zf"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":291 + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< + * elif t == NPY_CLONGDOUBLE: f = "Zg" + * elif t == NPY_OBJECT: f = "O" + */ + case NPY_CDOUBLE: + __pyx_v_f = ((char *)"Zd"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292 + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< + * elif t == NPY_OBJECT: f = "O" + * else: + */ + case NPY_CLONGDOUBLE: + __pyx_v_f = ((char *)"Zg"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":293 + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" + * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + */ + case NPY_OBJECT: + __pyx_v_f = ((char *)"O"); + break; + default: + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":295 + * elif t == NPY_OBJECT: f = "O" + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< + * info.format = f + * return + */ + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_6, 0, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 295, __pyx_L1_error) + break; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296 + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * info.format = f # <<<<<<<<<<<<<< + * return + * else: + */ + __pyx_v_info->format = __pyx_v_f; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297 + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * info.format = f + * return # <<<<<<<<<<<<<< + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 + * info.obj = self + * + * if not hasfields: # <<<<<<<<<<<<<< + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":299 + * return + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 + */ + /*else*/ { + __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":300 + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) + * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< + * offset = 0 + * f = _util_dtypestring(descr, info.format + 1, + */ + (__pyx_v_info->format[0]) = '^'; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":301 + * info.format = PyObject_Malloc(_buffer_format_string_len) + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 # <<<<<<<<<<<<<< + * f = _util_dtypestring(descr, info.format + 1, + * info.format + _buffer_format_string_len, + */ + __pyx_v_offset = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 + * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< + * info.format + _buffer_format_string_len, + * &offset) + */ + __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 302, __pyx_L1_error) + __pyx_v_f = __pyx_t_7; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":305 + * info.format + _buffer_format_string_len, + * &offset) + * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + */ + (__pyx_v_f[0]) = '\x00'; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 + * # experimental exception made for __getbuffer__ and __releasebuffer__ + * # -- the details of this may change. + * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< + * # This implementation of getbuffer is geared towards Cython + * # requirements, and does not yet fullfill the PEP. + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(Py_None); + __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; + } + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_descr); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 + * f[0] = c'\0' # Terminate format string + * + * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + */ + +/* Python wrapper */ +static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ +static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); + __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__releasebuffer__", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":309 + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) # <<<<<<<<<<<<<< + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * PyObject_Free(info.strides) + */ + PyObject_Free(__pyx_v_info->format); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * PyObject_Free(info.strides) + * # info.shape was stored after info.strides in the same block + */ + __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":311 + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * PyObject_Free(info.strides) # <<<<<<<<<<<<<< + * # info.shape was stored after info.strides in the same block + * + */ + PyObject_Free(__pyx_v_info->strides); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * PyObject_Free(info.strides) + * # info.shape was stored after info.strides in the same block + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 + * f[0] = c'\0' # Terminate format string + * + * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":789 + * + * cdef inline object PyArray_MultiIterNew1(a): + * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew2(a, b): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 789, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":792 + * + * cdef inline object PyArray_MultiIterNew2(a, b): + * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 792, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":795 + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 795, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":798 + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 798, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801 + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 801, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805 + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape # <<<<<<<<<<<<<< + * else: + * return () + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); + __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":807 + * return d.subarray.shape + * else: + * return () # <<<<<<<<<<<<<< + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_empty_tuple); + __pyx_r = __pyx_empty_tuple; + goto __pyx_L0; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { + PyArray_Descr *__pyx_v_child = 0; + int __pyx_v_endian_detector; + int __pyx_v_little_endian; + PyObject *__pyx_v_fields = 0; + PyObject *__pyx_v_childname = NULL; + PyObject *__pyx_v_new_offset = NULL; + PyObject *__pyx_v_t = NULL; + char *__pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_t_7; + long __pyx_t_8; + char *__pyx_t_9; + __Pyx_RefNannySetupContext("_util_dtypestring", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814 + * + * cdef dtype child + * cdef int endian_detector = 1 # <<<<<<<<<<<<<< + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * cdef tuple fields + */ + __pyx_v_endian_detector = 1; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":815 + * cdef dtype child + * cdef int endian_detector = 1 + * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< + * cdef tuple fields + * + */ + __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + if (unlikely(__pyx_v_descr->names == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); + __PYX_ERR(1, 818, __pyx_L1_error) + } + __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; + for (;;) { + if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 818, __pyx_L1_error) + #else + __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 818, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + #endif + __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); + __pyx_t_3 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":819 + * + * for childname in descr.names: + * fields = descr.fields[childname] # <<<<<<<<<<<<<< + * child, new_offset = fields + * + */ + if (unlikely(__pyx_v_descr->fields == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 819, __pyx_L1_error) + } + __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); + __pyx_t_3 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":820 + * for childname in descr.names: + * fields = descr.fields[childname] + * child, new_offset = fields # <<<<<<<<<<<<<< + * + * if (end - f) - (new_offset - offset[0]) < 15: + */ + if (likely(__pyx_v_fields != Py_None)) { + PyObject* sequence = __pyx_v_fields; + #if !CYTHON_COMPILING_IN_PYPY + Py_ssize_t size = Py_SIZE(sequence); + #else + Py_ssize_t size = PySequence_Size(sequence); + #endif + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 820, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + #else + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #endif + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 820, __pyx_L1_error) + } + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); + __pyx_t_3 = 0; + __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 823, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 823, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); + if (!__pyx_t_7) { + goto __pyx_L8_next_or; + } else { + } + __pyx_t_7 = (__pyx_v_little_endian != 0); + if (!__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_L8_next_or:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":826 + * + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< + * raise ValueError(u"Non-native byte order not supported") + * # One could encode it in the format string and have Cython + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); + if (__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); + __pyx_t_6 = __pyx_t_7; + __pyx_L7_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 827, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 + * + * # Output padding bytes + * while offset[0] < new_offset: # <<<<<<<<<<<<<< + * f[0] = 120 # "x"; pad byte + * f += 1 + */ + while (1) { + __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!__pyx_t_6) break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838 + * # Output padding bytes + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< + * f += 1 + * offset[0] += 1 + */ + (__pyx_v_f[0]) = 0x78; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":839 + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte + * f += 1 # <<<<<<<<<<<<<< + * offset[0] += 1 + * + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840 + * f[0] = 120 # "x"; pad byte + * f += 1 + * offset[0] += 1 # <<<<<<<<<<<<<< + * + * offset[0] += child.itemsize + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842 + * offset[0] += 1 + * + * offset[0] += child.itemsize # <<<<<<<<<<<<<< + * + * if not PyDataType_HASFIELDS(child): + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":845 + * + * if not PyDataType_HASFIELDS(child): + * t = child.type_num # <<<<<<<<<<<<<< + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") + */ + __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 845, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 847, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 847, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":850 + * + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 98; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 66; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852 + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x68; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853 + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 72; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":854 + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x69; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 73; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856 + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x6C; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":857 + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 76; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x71; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":859 + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 81; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860 + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x66; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":861 + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x64; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":862 + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x67; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":863 + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x66; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":864 + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x64; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":865 + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x67; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":866 + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 79; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":868 + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< + * f += 1 + * else: + */ + /*else*/ { + __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); + __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 868, __pyx_L1_error) + } + __pyx_L15:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":869 + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * f += 1 # <<<<<<<<<<<<<< + * else: + * # Cython ignores struct boundary information ("T{...}"), + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + goto __pyx_L13; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":873 + * # Cython ignores struct boundary information ("T{...}"), + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< + * return f + * + */ + /*else*/ { + __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 873, __pyx_L1_error) + __pyx_v_f = __pyx_t_9; + } + __pyx_L13:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":874 + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) + * return f # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_f; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_child); + __Pyx_XDECREF(__pyx_v_fields); + __Pyx_XDECREF(__pyx_v_childname); + __Pyx_XDECREF(__pyx_v_new_offset); + __Pyx_XDECREF(__pyx_v_t); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 + * + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * cdef PyObject* baseptr + * if base is None: + */ + +static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { + PyObject *__pyx_v_baseptr; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + __Pyx_RefNannySetupContext("set_array_base", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 + * cdef inline void set_array_base(ndarray arr, object base): + * cdef PyObject* baseptr + * if base is None: # <<<<<<<<<<<<<< + * baseptr = NULL + * else: + */ + __pyx_t_1 = (__pyx_v_base == Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":993 + * cdef PyObject* baseptr + * if base is None: + * baseptr = NULL # <<<<<<<<<<<<<< + * else: + * Py_INCREF(base) # important to do this before decref below! + */ + __pyx_v_baseptr = NULL; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 + * cdef inline void set_array_base(ndarray arr, object base): + * cdef PyObject* baseptr + * if base is None: # <<<<<<<<<<<<<< + * baseptr = NULL + * else: + */ + goto __pyx_L3; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":995 + * baseptr = NULL + * else: + * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< + * baseptr = base + * Py_XDECREF(arr.base) + */ + /*else*/ { + Py_INCREF(__pyx_v_base); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":996 + * else: + * Py_INCREF(base) # important to do this before decref below! + * baseptr = base # <<<<<<<<<<<<<< + * Py_XDECREF(arr.base) + * arr.base = baseptr + */ + __pyx_v_baseptr = ((PyObject *)__pyx_v_base); + } + __pyx_L3:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997 + * Py_INCREF(base) # important to do this before decref below! + * baseptr = base + * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< + * arr.base = baseptr + * + */ + Py_XDECREF(__pyx_v_arr->base); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":998 + * baseptr = base + * Py_XDECREF(arr.base) + * arr.base = baseptr # <<<<<<<<<<<<<< + * + * cdef inline object get_array_base(ndarray arr): + */ + __pyx_v_arr->base = __pyx_v_baseptr; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 + * + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * cdef PyObject* baseptr + * if base is None: + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 + * arr.base = baseptr + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * if arr.base is NULL: + * return None + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("get_array_base", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 + * + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: # <<<<<<<<<<<<<< + * return None + * else: + */ + __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1002 + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: + * return None # <<<<<<<<<<<<<< + * else: + * return arr.base + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(Py_None); + __pyx_r = Py_None; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 + * + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: # <<<<<<<<<<<<<< + * return None + * else: + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1004 + * return None + * else: + * return arr.base # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); + __pyx_r = ((PyObject *)__pyx_v_arr->base); + goto __pyx_L0; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 + * arr.base = baseptr + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * if arr.base is NULL: + * return None + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * _import_array() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_array", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1011 + * cdef inline int import_array() except -1: + * try: + * _import_array() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") + */ + __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1011, __pyx_L3_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1012 + * try: + * _import_array() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.multiarray failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1012, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 + * _import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1013, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1013, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * _import_array() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_umath", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1017 + * cdef inline int import_umath() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1017, __pyx_L3_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1018 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1018, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1019, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1019, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_ufunc", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1023 + * cdef inline int import_ufunc() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1023, __pyx_L3_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1024 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1024, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1025, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1025, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_boundary_none(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_boundary_none}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "boundary_none", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_kp_s_Convolution_kernel_must_have_odd, __pyx_k_Convolution_kernel_must_have_odd, sizeof(__pyx_k_Convolution_kernel_must_have_odd), 0, 0, 1, 0}, + {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, + {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, + {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, + {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, + {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, + {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, + {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, + {&__pyx_kp_s_astropy_convolution_boundary_non, __pyx_k_astropy_convolution_boundary_non, sizeof(__pyx_k_astropy_convolution_boundary_non), 0, 0, 1, 0}, + {&__pyx_n_s_astropy_convolution_boundary_non_2, __pyx_k_astropy_convolution_boundary_non_2, sizeof(__pyx_k_astropy_convolution_boundary_non_2), 0, 0, 1, 1}, + {&__pyx_n_s_bot, __pyx_k_bot, sizeof(__pyx_k_bot), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_conv, __pyx_k_conv, sizeof(__pyx_k_conv), 0, 0, 1, 1}, + {&__pyx_n_s_convolve1d_boundary_none, __pyx_k_convolve1d_boundary_none, sizeof(__pyx_k_convolve1d_boundary_none), 0, 0, 1, 1}, + {&__pyx_n_s_convolve2d_boundary_none, __pyx_k_convolve2d_boundary_none, sizeof(__pyx_k_convolve2d_boundary_none), 0, 0, 1, 1}, + {&__pyx_n_s_convolve3d_boundary_none, __pyx_k_convolve3d_boundary_none, sizeof(__pyx_k_convolve3d_boundary_none), 0, 0, 1, 1}, + {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, + {&__pyx_n_s_f, __pyx_k_f, sizeof(__pyx_k_f), 0, 0, 1, 1}, + {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, + {&__pyx_n_s_g, __pyx_k_g, sizeof(__pyx_k_g), 0, 0, 1, 1}, + {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, + {&__pyx_n_s_ii, __pyx_k_ii, sizeof(__pyx_k_ii), 0, 0, 1, 1}, + {&__pyx_n_s_iimax, __pyx_k_iimax, sizeof(__pyx_k_iimax), 0, 0, 1, 1}, + {&__pyx_n_s_iimin, __pyx_k_iimin, sizeof(__pyx_k_iimin), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, + {&__pyx_n_s_jj, __pyx_k_jj, sizeof(__pyx_k_jj), 0, 0, 1, 1}, + {&__pyx_n_s_jjmax, __pyx_k_jjmax, sizeof(__pyx_k_jjmax), 0, 0, 1, 1}, + {&__pyx_n_s_jjmin, __pyx_k_jjmin, sizeof(__pyx_k_jjmin), 0, 0, 1, 1}, + {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, + {&__pyx_n_s_ker, __pyx_k_ker, sizeof(__pyx_k_ker), 0, 0, 1, 1}, + {&__pyx_n_s_kk, __pyx_k_kk, sizeof(__pyx_k_kk), 0, 0, 1, 1}, + {&__pyx_n_s_kkmax, __pyx_k_kkmax, sizeof(__pyx_k_kkmax), 0, 0, 1, 1}, + {&__pyx_n_s_kkmin, __pyx_k_kkmin, sizeof(__pyx_k_kkmin), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, + {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, + {&__pyx_n_s_nkx, __pyx_k_nkx, sizeof(__pyx_k_nkx), 0, 0, 1, 1}, + {&__pyx_n_s_nky, __pyx_k_nky, sizeof(__pyx_k_nky), 0, 0, 1, 1}, + {&__pyx_n_s_nkz, __pyx_k_nkz, sizeof(__pyx_k_nkz), 0, 0, 1, 1}, + {&__pyx_n_s_normalize_by_kernel, __pyx_k_normalize_by_kernel, sizeof(__pyx_k_normalize_by_kernel), 0, 0, 1, 1}, + {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, + {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, + {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, + {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, + {&__pyx_n_s_nx, __pyx_k_nx, sizeof(__pyx_k_nx), 0, 0, 1, 1}, + {&__pyx_n_s_ny, __pyx_k_ny, sizeof(__pyx_k_ny), 0, 0, 1, 1}, + {&__pyx_n_s_nz, __pyx_k_nz, sizeof(__pyx_k_nz), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_n_s_top, __pyx_k_top, sizeof(__pyx_k_top), 0, 0, 1, 1}, + {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, + {&__pyx_n_s_val, __pyx_k_val, sizeof(__pyx_k_val), 0, 0, 1, 1}, + {&__pyx_n_s_wkx, __pyx_k_wkx, sizeof(__pyx_k_wkx), 0, 0, 1, 1}, + {&__pyx_n_s_wky, __pyx_k_wky, sizeof(__pyx_k_wky), 0, 0, 1, 1}, + {&__pyx_n_s_wkz, __pyx_k_wkz, sizeof(__pyx_k_wkz), 0, 0, 1, 1}, + {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 22, __pyx_L1_error) + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 44, __pyx_L1_error) + __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 823, __pyx_L1_error) + __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1013, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "astropy/convolution/boundary_none.pyx":22 + * + * if g.shape[0] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Convolution_kernel_must_have_odd); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple_); + __Pyx_GIVEREF(__pyx_tuple_); + + /* "astropy/convolution/boundary_none.pyx":70 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Convolution_kernel_must_have_odd); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 70, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "astropy/convolution/boundary_none.pyx":124 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_Convolution_kernel_must_have_odd); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 124, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + */ + __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< + * + * info.buf = PyArray_DATA(self) + */ + __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 239, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + */ + __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 276, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 823, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__7); + __Pyx_GIVEREF(__pyx_tuple__7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 827, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__8); + __Pyx_GIVEREF(__pyx_tuple__8); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 847, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__9); + __Pyx_GIVEREF(__pyx_tuple__9); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 + * _import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__10); + __Pyx_GIVEREF(__pyx_tuple__10); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 1019, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__11); + __Pyx_GIVEREF(__pyx_tuple__11); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + */ + __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 1025, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__12); + __Pyx_GIVEREF(__pyx_tuple__12); + + /* "astropy/convolution/boundary_none.pyx":17 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_none(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * bint normalize_by_kernel): + */ + __pyx_tuple__13 = PyTuple_Pack(15, __pyx_n_s_f, __pyx_n_s_g, __pyx_n_s_normalize_by_kernel, __pyx_n_s_nx, __pyx_n_s_nkx, __pyx_n_s_wkx, __pyx_n_s_conv, __pyx_n_s_i, __pyx_n_s_ii, __pyx_n_s_iimin, __pyx_n_s_iimax, __pyx_n_s_top, __pyx_n_s_bot, __pyx_n_s_ker, __pyx_n_s_val); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__13); + __Pyx_GIVEREF(__pyx_tuple__13); + __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(3, 0, 15, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_convolution_boundary_non, __pyx_n_s_convolve1d_boundary_none, 17, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) __PYX_ERR(0, 17, __pyx_L1_error) + + /* "astropy/convolution/boundary_none.pyx":65 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_none(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * bint normalize_by_kernel): + */ + __pyx_tuple__15 = PyTuple_Pack(22, __pyx_n_s_f, __pyx_n_s_g, __pyx_n_s_normalize_by_kernel, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_nkx, __pyx_n_s_nky, __pyx_n_s_wkx, __pyx_n_s_wky, __pyx_n_s_conv, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_ii, __pyx_n_s_jj, __pyx_n_s_iimin, __pyx_n_s_iimax, __pyx_n_s_jjmin, __pyx_n_s_jjmax, __pyx_n_s_top, __pyx_n_s_bot, __pyx_n_s_ker, __pyx_n_s_val); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 65, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__15); + __Pyx_GIVEREF(__pyx_tuple__15); + __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(3, 0, 22, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_convolution_boundary_non, __pyx_n_s_convolve2d_boundary_none, 65, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) __PYX_ERR(0, 65, __pyx_L1_error) + + /* "astropy/convolution/boundary_none.pyx":119 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_none(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * bint normalize_by_kernel): + */ + __pyx_tuple__17 = PyTuple_Pack(29, __pyx_n_s_f, __pyx_n_s_g, __pyx_n_s_normalize_by_kernel, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_nz, __pyx_n_s_nkx, __pyx_n_s_nky, __pyx_n_s_nkz, __pyx_n_s_wkx, __pyx_n_s_wky, __pyx_n_s_wkz, __pyx_n_s_conv, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_ii, __pyx_n_s_jj, __pyx_n_s_kk, __pyx_n_s_iimin, __pyx_n_s_iimax, __pyx_n_s_jjmin, __pyx_n_s_jjmax, __pyx_n_s_kkmin, __pyx_n_s_kkmax, __pyx_n_s_top, __pyx_n_s_bot, __pyx_n_s_ker, __pyx_n_s_val); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(0, 119, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__17); + __Pyx_GIVEREF(__pyx_tuple__17); + __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(3, 0, 29, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_convolution_boundary_non, __pyx_n_s_convolve3d_boundary_none, 119, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) __PYX_ERR(0, 119, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + return 0; + __pyx_L1_error:; + return -1; +} + +#if PY_MAJOR_VERSION < 3 +PyMODINIT_FUNC initboundary_none(void); /*proto*/ +PyMODINIT_FUNC initboundary_none(void) +#else +PyMODINIT_FUNC PyInit_boundary_none(void); /*proto*/ +PyMODINIT_FUNC PyInit_boundary_none(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + result = PyDict_SetItemString(moddict, to_name, value); + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static int __pyx_pymod_exec_boundary_none(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; + #endif + #if CYTHON_REFNANNY + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); + if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); + } + #endif + __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_boundary_none(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("boundary_none", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + #if CYTHON_COMPILING_IN_PYPY + Py_INCREF(__pyx_b); + #endif + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_astropy__convolution__boundary_none) { + if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "astropy.convolution.boundary_none")) { + if (unlikely(PyDict_SetItemString(modules, "astropy.convolution.boundary_none", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global init code ---*/ + /*--- Variable export code ---*/ + /*--- Function export code ---*/ + /*--- Type init code ---*/ + /*--- Type import code ---*/ + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", + #if CYTHON_COMPILING_IN_PYPY + sizeof(PyTypeObject), + #else + sizeof(PyHeapTypeObject), + #endif + 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) + __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 163, __pyx_L1_error) + __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 185, __pyx_L1_error) + __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 189, __pyx_L1_error) + __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 198, __pyx_L1_error) + __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 885, __pyx_L1_error) + /*--- Variable import code ---*/ + /*--- Function import code ---*/ + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "astropy/convolution/boundary_none.pyx":3 + * # Licensed under a 3-clause BSD style license - see LICENSE.rst + * from __future__ import division + * import numpy as np # <<<<<<<<<<<<<< + * cimport numpy as np + * + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 3, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/convolution/boundary_none.pyx":7 + * + * + * DTYPE = np.float # <<<<<<<<<<<<<< + * ctypedef np.float_t DTYPE_t + * + */ + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) __PYX_ERR(0, 7, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_none.pyx":17 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_none(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * bint normalize_by_kernel): + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7astropy_11convolution_13boundary_none_1convolve1d_boundary_none, NULL, __pyx_n_s_astropy_convolution_boundary_non_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_convolve1d_boundary_none, __pyx_t_2) < 0) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_none.pyx":65 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_none(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * bint normalize_by_kernel): + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7astropy_11convolution_13boundary_none_3convolve2d_boundary_none, NULL, __pyx_n_s_astropy_convolution_boundary_non_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 65, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_convolve2d_boundary_none, __pyx_t_2) < 0) __PYX_ERR(0, 65, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_none.pyx":119 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_none(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * bint normalize_by_kernel): + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7astropy_11convolution_13boundary_none_5convolve3d_boundary_none, NULL, __pyx_n_s_astropy_convolution_boundary_non_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 119, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_convolve3d_boundary_none, __pyx_t_2) < 0) __PYX_ERR(0, 119, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_none.pyx":1 + * # Licensed under a 3-clause BSD style license - see LICENSE.rst # <<<<<<<<<<<<<< + * from __future__ import division + * import numpy as np + */ + __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init astropy.convolution.boundary_none", 0, __pyx_lineno, __pyx_filename); + } + Py_DECREF(__pyx_m); __pyx_m = 0; + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init astropy.convolution.boundary_none"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule((char *)modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* ArgTypeTest */ +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", + name, type->tp_name, Py_TYPE(obj)->tp_name); + return 0; +} + +/* IsLittleEndian */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) +{ + union { + uint32_t u32; + uint8_t u8[4]; + } S; + S.u32 = 0x01020304; + return S.u8[0] == 4; +} + +/* BufferFormatCheck */ +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type) { + stack[0].field = &ctx->root; + stack[0].parent_offset = 0; + ctx->root.type = type; + ctx->root.name = "buffer dtype"; + ctx->root.offset = 0; + ctx->head = stack; + ctx->head->field = &ctx->root; + ctx->fmt_offset = 0; + ctx->head->parent_offset = 0; + ctx->new_packmode = '@'; + ctx->enc_packmode = '@'; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->is_complex = 0; + ctx->is_valid_array = 0; + ctx->struct_alignment = 0; + while (type->typegroup == 'S') { + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = 0; + type = type->fields->type; + } +} +static int __Pyx_BufFmt_ParseNumber(const char** ts) { + int count; + const char* t = *ts; + if (*t < '0' || *t > '9') { + return -1; + } else { + count = *t++ - '0'; + while (*t >= '0' && *t < '9') { + count *= 10; + count += *t++ - '0'; + } + } + *ts = t; + return count; +} +static int __Pyx_BufFmt_ExpectNumber(const char **ts) { + int number = __Pyx_BufFmt_ParseNumber(ts); + if (number == -1) + PyErr_Format(PyExc_ValueError,\ + "Does not understand character buffer dtype format string ('%c')", **ts); + return number; +} +static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { + PyErr_Format(PyExc_ValueError, + "Unexpected format string character: '%c'", ch); +} +static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { + switch (ch) { + case 'c': return "'char'"; + case 'b': return "'signed char'"; + case 'B': return "'unsigned char'"; + case 'h': return "'short'"; + case 'H': return "'unsigned short'"; + case 'i': return "'int'"; + case 'I': return "'unsigned int'"; + case 'l': return "'long'"; + case 'L': return "'unsigned long'"; + case 'q': return "'long long'"; + case 'Q': return "'unsigned long long'"; + case 'f': return (is_complex ? "'complex float'" : "'float'"); + case 'd': return (is_complex ? "'complex double'" : "'double'"); + case 'g': return (is_complex ? "'complex long double'" : "'long double'"); + case 'T': return "a struct"; + case 'O': return "Python object"; + case 'P': return "a pointer"; + case 's': case 'p': return "a string"; + case 0: return "end"; + default: return "unparseable format string"; + } +} +static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return 2; + case 'i': case 'I': case 'l': case 'L': return 4; + case 'q': case 'Q': return 8; + case 'f': return (is_complex ? 8 : 4); + case 'd': return (is_complex ? 16 : 8); + case 'g': { + PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); + return 0; + } + case 'O': case 'P': return sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { + switch (ch) { + case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(short); + case 'i': case 'I': return sizeof(int); + case 'l': case 'L': return sizeof(long); + #ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(PY_LONG_LONG); + #endif + case 'f': return sizeof(float) * (is_complex ? 2 : 1); + case 'd': return sizeof(double) * (is_complex ? 2 : 1); + case 'g': return sizeof(long double) * (is_complex ? 2 : 1); + case 'O': case 'P': return sizeof(void*); + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +typedef struct { char c; short x; } __Pyx_st_short; +typedef struct { char c; int x; } __Pyx_st_int; +typedef struct { char c; long x; } __Pyx_st_long; +typedef struct { char c; float x; } __Pyx_st_float; +typedef struct { char c; double x; } __Pyx_st_double; +typedef struct { char c; long double x; } __Pyx_st_longdouble; +typedef struct { char c; void *x; } __Pyx_st_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_st_float) - sizeof(float); + case 'd': return sizeof(__Pyx_st_double) - sizeof(double); + case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +/* These are for computing the padding at the end of the struct to align + on the first member of the struct. This will probably the same as above, + but we don't have any guarantees. + */ +typedef struct { short x; char c; } __Pyx_pad_short; +typedef struct { int x; char c; } __Pyx_pad_int; +typedef struct { long x; char c; } __Pyx_pad_long; +typedef struct { float x; char c; } __Pyx_pad_float; +typedef struct { double x; char c; } __Pyx_pad_double; +typedef struct { long double x; char c; } __Pyx_pad_longdouble; +typedef struct { void *x; char c; } __Pyx_pad_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); + case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); + case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { + switch (ch) { + case 'c': + return 'H'; + case 'b': case 'h': case 'i': + case 'l': case 'q': case 's': case 'p': + return 'I'; + case 'B': case 'H': case 'I': case 'L': case 'Q': + return 'U'; + case 'f': case 'd': case 'g': + return (is_complex ? 'C' : 'R'); + case 'O': + return 'O'; + case 'P': + return 'P'; + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { + if (ctx->head == NULL || ctx->head->field == &ctx->root) { + const char* expected; + const char* quote; + if (ctx->head == NULL) { + expected = "end"; + quote = ""; + } else { + expected = ctx->head->field->type->name; + quote = "'"; + } + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected %s%s%s but got %s", + quote, expected, quote, + __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); + } else { + __Pyx_StructField* field = ctx->head->field; + __Pyx_StructField* parent = (ctx->head - 1)->field; + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", + field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), + parent->type->name, field->name); + } +} +static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { + char group; + size_t size, offset, arraysize = 1; + if (ctx->enc_type == 0) return 0; + if (ctx->head->field->type->arraysize[0]) { + int i, ndim = 0; + if (ctx->enc_type == 's' || ctx->enc_type == 'p') { + ctx->is_valid_array = ctx->head->field->type->ndim == 1; + ndim = 1; + if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %zu", + ctx->head->field->type->arraysize[0], ctx->enc_count); + return -1; + } + } + if (!ctx->is_valid_array) { + PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", + ctx->head->field->type->ndim, ndim); + return -1; + } + for (i = 0; i < ctx->head->field->type->ndim; i++) { + arraysize *= ctx->head->field->type->arraysize[i]; + } + ctx->is_valid_array = 0; + ctx->enc_count = 1; + } + group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); + do { + __Pyx_StructField* field = ctx->head->field; + __Pyx_TypeInfo* type = field->type; + if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { + size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); + } else { + size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); + } + if (ctx->enc_packmode == '@') { + size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); + size_t align_mod_offset; + if (align_at == 0) return -1; + align_mod_offset = ctx->fmt_offset % align_at; + if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; + if (ctx->struct_alignment == 0) + ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, + ctx->is_complex); + } + if (type->size != size || type->typegroup != group) { + if (type->typegroup == 'C' && type->fields != NULL) { + size_t parent_offset = ctx->head->parent_offset + field->offset; + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = parent_offset; + continue; + } + if ((type->typegroup == 'H' || group == 'H') && type->size == size) { + } else { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + } + offset = ctx->head->parent_offset + field->offset; + if (ctx->fmt_offset != offset) { + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", + (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); + return -1; + } + ctx->fmt_offset += size; + if (arraysize) + ctx->fmt_offset += (arraysize - 1) * size; + --ctx->enc_count; + while (1) { + if (field == &ctx->root) { + ctx->head = NULL; + if (ctx->enc_count != 0) { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + break; + } + ctx->head->field = ++field; + if (field->type == NULL) { + --ctx->head; + field = ctx->head->field; + continue; + } else if (field->type->typegroup == 'S') { + size_t parent_offset = ctx->head->parent_offset + field->offset; + if (field->type->fields->type == NULL) continue; + field = field->type->fields; + ++ctx->head; + ctx->head->field = field; + ctx->head->parent_offset = parent_offset; + break; + } else { + break; + } + } + } while (ctx->enc_count); + ctx->enc_type = 0; + ctx->is_complex = 0; + return 0; +} +static PyObject * +__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) +{ + const char *ts = *tsp; + int i = 0, number; + int ndim = ctx->head->field->type->ndim; +; + ++ts; + if (ctx->new_count != 1) { + PyErr_SetString(PyExc_ValueError, + "Cannot handle repeated arrays in format string"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + while (*ts && *ts != ')') { + switch (*ts) { + case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; + default: break; + } + number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) + return PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %d", + ctx->head->field->type->arraysize[i], number); + if (*ts != ',' && *ts != ')') + return PyErr_Format(PyExc_ValueError, + "Expected a comma in format string, got '%c'", *ts); + if (*ts == ',') ts++; + i++; + } + if (i != ndim) + return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", + ctx->head->field->type->ndim, i); + if (!*ts) { + PyErr_SetString(PyExc_ValueError, + "Unexpected end of format string, expected ')'"); + return NULL; + } + ctx->is_valid_array = 1; + ctx->new_count = 1; + *tsp = ++ts; + return Py_None; +} +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { + int got_Z = 0; + while (1) { + switch(*ts) { + case 0: + if (ctx->enc_type != 0 && ctx->head == NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + if (ctx->head != NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + return ts; + case ' ': + case '\r': + case '\n': + ++ts; + break; + case '<': + if (!__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '>': + case '!': + if (__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '=': + case '@': + case '^': + ctx->new_packmode = *ts++; + break; + case 'T': + { + const char* ts_after_sub; + size_t i, struct_count = ctx->new_count; + size_t struct_alignment = ctx->struct_alignment; + ctx->new_count = 1; + ++ts; + if (*ts != '{') { + PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + ctx->enc_count = 0; + ctx->struct_alignment = 0; + ++ts; + ts_after_sub = ts; + for (i = 0; i != struct_count; ++i) { + ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); + if (!ts_after_sub) return NULL; + } + ts = ts_after_sub; + if (struct_alignment) ctx->struct_alignment = struct_alignment; + } + break; + case '}': + { + size_t alignment = ctx->struct_alignment; + ++ts; + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + if (alignment && ctx->fmt_offset % alignment) { + ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); + } + } + return ts; + case 'x': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->fmt_offset += ctx->new_count; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->enc_packmode = ctx->new_packmode; + ++ts; + break; + case 'Z': + got_Z = 1; + ++ts; + if (*ts != 'f' && *ts != 'd' && *ts != 'g') { + __Pyx_BufFmt_RaiseUnexpectedChar('Z'); + return NULL; + } + case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': + case 'l': case 'L': case 'q': case 'Q': + case 'f': case 'd': case 'g': + case 'O': case 'p': + if (ctx->enc_type == *ts && got_Z == ctx->is_complex && + ctx->enc_packmode == ctx->new_packmode) { + ctx->enc_count += ctx->new_count; + ctx->new_count = 1; + got_Z = 0; + ++ts; + break; + } + case 's': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_count = ctx->new_count; + ctx->enc_packmode = ctx->new_packmode; + ctx->enc_type = *ts; + ctx->is_complex = got_Z; + ++ts; + ctx->new_count = 1; + got_Z = 0; + break; + case ':': + ++ts; + while(*ts != ':') ++ts; + ++ts; + break; + case '(': + if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; + break; + default: + { + int number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + ctx->new_count = (size_t)number; + } + } + } +} + +/* BufferGetAndValidate */ + static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { + if (unlikely(info->buf == NULL)) return; + if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; + __Pyx_ReleaseBuffer(info); +} +static void __Pyx_ZeroBuffer(Py_buffer* buf) { + buf->buf = NULL; + buf->obj = NULL; + buf->strides = __Pyx_zeros; + buf->shape = __Pyx_zeros; + buf->suboffsets = __Pyx_minusones; +} +static int __Pyx__GetBufferAndValidate( + Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, + int nd, int cast, __Pyx_BufFmt_StackElem* stack) +{ + buf->buf = NULL; + if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { + __Pyx_ZeroBuffer(buf); + return -1; + } + if (unlikely(buf->ndim != nd)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + nd, buf->ndim); + goto fail; + } + if (!cast) { + __Pyx_BufFmt_Context ctx; + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; + } + if (unlikely((unsigned)buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", + buf->itemsize, (buf->itemsize > 1) ? "s" : "", + dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; + return 0; +fail:; + __Pyx_SafeReleaseBuffer(buf); + return -1; +} + +/* None */ + static CYTHON_INLINE long __Pyx_mod_long(long a, long b) { + long r = a % b; + r += ((r != 0) & ((r ^ b) < 0)) * b; + return r; +} + +/* PyObjectCall */ + #if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = func->ob_type->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyErrFetchRestore */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* RaiseException */ + #if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* GetModuleGlobalName */ + static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS + result = PyDict_GetItem(__pyx_d, name); + if (likely(result)) { + Py_INCREF(result); + } else { +#else + result = PyObject_GetItem(__pyx_d, name); + if (!result) { + PyErr_Clear(); +#endif + result = __Pyx_GetBuiltinName(name); + } + return result; +} + +/* None */ + static CYTHON_INLINE long __Pyx_div_long(long a, long b) { + long q = a / b; + long r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* ExtTypeTest */ + static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", + Py_TYPE(obj)->tp_name, type->tp_name); + return 0; +} + +/* RaiseTooManyValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* RaiseNoneIterError */ + static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/* SaveResetException */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if PY_VERSION_HEX >= 0x030700A2 + *type = tstate->exc_state.exc_type; + *value = tstate->exc_state.exc_value; + *tb = tstate->exc_state.exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if PY_VERSION_HEX >= 0x030700A2 + tmp_type = tstate->exc_state.exc_type; + tmp_value = tstate->exc_state.exc_value; + tmp_tb = tstate->exc_state.exc_traceback; + tstate->exc_state.exc_type = type; + tstate->exc_state.exc_value = value; + tstate->exc_state.exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* PyErrExceptionMatches */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* GetException */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { +#endif + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if PY_VERSION_HEX >= 0x030700A2 + tmp_type = tstate->exc_state.exc_type; + tmp_value = tstate->exc_state.exc_value; + tmp_tb = tstate->exc_state.exc_traceback; + tstate->exc_state.exc_type = local_type; + tstate->exc_state.exc_value = local_value; + tstate->exc_state.exc_traceback = local_tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* Import */ + static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if (strchr(__Pyx_MODULE_NAME, '.')) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* CLineInTraceback */ + #ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (PyObject_Not(use_cline) != 0) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ + static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ + #include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +#if PY_MAJOR_VERSION < 3 +static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { + if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); + PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); + return -1; +} +static void __Pyx_ReleaseBuffer(Py_buffer *view) { + PyObject *obj = view->obj; + if (!obj) return; + if (PyObject_CheckBuffer(obj)) { + PyBuffer_Release(view); + return; + } + if ((0)) {} + else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); + view->obj = NULL; + Py_DECREF(obj); +} +#endif + + + /* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { + const int neg_one = (int) -1, const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* CIntFromPyVerify */ + #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value) { + const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(unsigned int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(unsigned int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(unsigned int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(unsigned int), + little, !is_unsigned); + } +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { + const long neg_one = (long) -1, const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return ::std::complex< float >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return x + y*(__pyx_t_float_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + __pyx_t_float_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabsf(b.real) >= fabsf(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + float r = b.imag / b.real; + float s = 1.0 / (b.real + b.imag * r); + return __pyx_t_float_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + float r = b.real / b.imag; + float s = 1.0 / (b.imag + b.real * r); + return __pyx_t_float_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + float denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_float_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrtf(z.real*z.real + z.imag*z.imag); + #else + return hypotf(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + float r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + float denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(a, a); + case 3: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, a); + case 4: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = powf(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2f(0, -1); + } + } else { + r = __Pyx_c_abs_float(a); + theta = atan2f(a.imag, a.real); + } + lnr = logf(r); + z_r = expf(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cosf(z_theta); + z.imag = z_r * sinf(z_theta); + return z; + } + #endif +#endif + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return ::std::complex< double >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return x + y*(__pyx_t_double_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + __pyx_t_double_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabs(b.real) >= fabs(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + double r = b.imag / b.real; + double s = 1.0 / (b.real + b.imag * r); + return __pyx_t_double_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + double r = b.real / b.imag; + double s = 1.0 / (b.imag + b.real * r); + return __pyx_t_double_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + double denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_double_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrt(z.real*z.real + z.imag*z.imag); + #else + return hypot(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + double r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + double denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(a, a); + case 3: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, a); + case 4: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = pow(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2(0, -1); + } + } else { + r = __Pyx_c_abs_double(a); + theta = atan2(a.imag, a.real); + } + lnr = log(r); + z_r = exp(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cos(z_theta); + z.imag = z_r * sin(z_theta); + return z; + } + #endif +#endif + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { + const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(enum NPY_TYPES) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(enum NPY_TYPES) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { + const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(unsigned int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (unsigned int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0]) + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) { + return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) { + return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) { + return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (unsigned int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(unsigned int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, +digits[0]) + case -2: + if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(unsigned int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + unsigned int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (unsigned int) -1; + } + } else { + unsigned int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (unsigned int) -1; + val = __Pyx_PyInt_As_unsigned_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to unsigned int"); + return (unsigned int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to unsigned int"); + return (unsigned int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { + const int neg_one = (int) -1, const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { + const long neg_one = (long) -1, const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* FastTypeChecks */ + #if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { + if (likely(err == exc_type)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); + } + return PyErr_GivenExceptionMatches(err, exc_type); +} +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { + if (likely(err == exc_type1 || err == exc_type2)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); + } + return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); +} +#endif + +/* CheckBinaryVersion */ + static int __Pyx_check_binary_version(void) { + char ctversion[4], rtversion[4]; + PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); + PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); + if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { + char message[200]; + PyOS_snprintf(message, sizeof(message), + "compiletime version %s of module '%.100s' " + "does not match runtime version %s", + ctversion, __Pyx_MODULE_NAME, rtversion); + return PyErr_WarnEx(NULL, message, 1); + } + return 0; +} + +/* ModuleImport */ + #ifndef __PYX_HAVE_RT_ImportModule +#define __PYX_HAVE_RT_ImportModule +static PyObject *__Pyx_ImportModule(const char *name) { + PyObject *py_name = 0; + PyObject *py_module = 0; + py_name = __Pyx_PyIdentifier_FromString(name); + if (!py_name) + goto bad; + py_module = PyImport_Import(py_name); + Py_DECREF(py_name); + return py_module; +bad: + Py_XDECREF(py_name); + return 0; +} +#endif + +/* TypeImport */ + #ifndef __PYX_HAVE_RT_ImportType +#define __PYX_HAVE_RT_ImportType +static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, + size_t size, int strict) +{ + PyObject *py_module = 0; + PyObject *result = 0; + PyObject *py_name = 0; + char warning[200]; + Py_ssize_t basicsize; +#ifdef Py_LIMITED_API + PyObject *py_basicsize; +#endif + py_module = __Pyx_ImportModule(module_name); + if (!py_module) + goto bad; + py_name = __Pyx_PyIdentifier_FromString(class_name); + if (!py_name) + goto bad; + result = PyObject_GetAttr(py_module, py_name); + Py_DECREF(py_name); + py_name = 0; + Py_DECREF(py_module); + py_module = 0; + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#ifndef Py_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if (!strict && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", + module_name, class_name, basicsize, size); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + else if ((size_t)basicsize != size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", + module_name, class_name, basicsize, size); + goto bad; + } + return (PyTypeObject *)result; +bad: + Py_XDECREF(py_module); + Py_XDECREF(result); + return NULL; +} +#endif + +/* InitStrings */ + static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + PyErr_Clear(); + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(x); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/astropy/convolution/boundary_none.pyx b/astropy/convolution/boundary_none.pyx new file mode 100644 index 0000000..33fc39e --- /dev/null +++ b/astropy/convolution/boundary_none.pyx @@ -0,0 +1,175 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import division +import numpy as np +cimport numpy as np + + +DTYPE = np.float +ctypedef np.float_t DTYPE_t + +cdef extern from "numpy/npy_math.h" nogil: + bint npy_isnan(double x) + +cimport cython + + +@cython.boundscheck(False) # turn off bounds-checking for entire function +def convolve1d_boundary_none(np.ndarray[DTYPE_t, ndim=1] f, + np.ndarray[DTYPE_t, ndim=1] g, + bint normalize_by_kernel): + + if g.shape[0] % 2 != 1: + raise ValueError("Convolution kernel must have odd dimensions") + + assert f.dtype == DTYPE and g.dtype == DTYPE + + cdef int nx = f.shape[0] + cdef int nkx = g.shape[0] + cdef int wkx = nkx // 2 + + # The following need to be set to zeros rather than empty because the + # boundary does not get reset. + cdef np.ndarray[DTYPE_t, ndim=1] conv = np.zeros([nx], dtype=DTYPE) + + cdef unsigned int i, ii + + cdef int iimin, iimax + + cdef DTYPE_t top, bot, ker, val + + # release the GIL + with nogil: + + # Now run the proper convolution + for i in range(wkx, nx - wkx): + top = 0. + bot = 0. + for ii in range(i - wkx, i + wkx + 1): + val = f[ii] + ker = g[(nkx - 1 - (wkx + ii - i))] + if not npy_isnan(val): + top += val * ker + bot += ker + if normalize_by_kernel: + if bot == 0: + conv[i] = f[i] + else: + conv[i] = top / bot + else: + conv[i] = top + # GIL acquired again here + return conv + + +@cython.boundscheck(False) # turn off bounds-checking for entire function +def convolve2d_boundary_none(np.ndarray[DTYPE_t, ndim=2] f, + np.ndarray[DTYPE_t, ndim=2] g, + bint normalize_by_kernel): + + if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: + raise ValueError("Convolution kernel must have odd dimensions") + + assert f.dtype == DTYPE and g.dtype == DTYPE + + cdef int nx = f.shape[0] + cdef int ny = f.shape[1] + cdef int nkx = g.shape[0] + cdef int nky = g.shape[1] + cdef int wkx = nkx // 2 + cdef int wky = nky // 2 + + # The following need to be set to zeros rather than empty because the + # boundary does not get reset. + cdef np.ndarray[DTYPE_t, ndim=2] conv = np.zeros([nx, ny], dtype=DTYPE) + + cdef unsigned int i, j, ii, jj + + cdef int iimin, iimax, jjmin, jjmax + + cdef DTYPE_t top, bot, ker, val + + # release the GIL + with nogil: + + # Now run the proper convolution + for i in range(wkx, nx - wkx): + for j in range(wky, ny - wky): + top = 0. + bot = 0. + for ii in range(i - wkx, i + wkx + 1): + for jj in range(j - wky, j + wky + 1): + val = f[ii, jj] + ker = g[(nkx - 1 - (wkx + ii - i)), + (nky - 1 - (wky + jj - j))] + if not npy_isnan(val): + top += val * ker + bot += ker + if normalize_by_kernel: + if bot == 0: + conv[i, j] = f[i, j] + else: + conv[i, j] = top / bot + else: + conv[i, j] = top + # GIL acquired again here + return conv + + +@cython.boundscheck(False) # turn off bounds-checking for entire function +def convolve3d_boundary_none(np.ndarray[DTYPE_t, ndim=3] f, + np.ndarray[DTYPE_t, ndim=3] g, + bint normalize_by_kernel): + + if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: + raise ValueError("Convolution kernel must have odd dimensions") + + assert f.dtype == DTYPE and g.dtype == DTYPE + + cdef int nx = f.shape[0] + cdef int ny = f.shape[1] + cdef int nz = f.shape[2] + cdef int nkx = g.shape[0] + cdef int nky = g.shape[1] + cdef int nkz = g.shape[2] + cdef int wkx = nkx // 2 + cdef int wky = nky // 2 + cdef int wkz = nkz // 2 + + # The following need to be set to zeros rather than empty because the + # boundary does not get reset. + cdef np.ndarray[DTYPE_t, ndim=3] conv = np.zeros([nx, ny, nz], dtype=DTYPE) + + cdef unsigned int i, j, k, ii, jj, kk + + cdef int iimin, iimax, jjmin, jjmax, kkmin, kkmax + + cdef DTYPE_t top, bot, ker, val + + # release the GIL + with nogil: + + # Now run the proper convolution + for i in range(wkx, nx - wkx): + for j in range(wky, ny - wky): + for k in range(wkz, nz - wkz): + top = 0. + bot = 0. + for ii in range(i - wkx, i + wkx + 1): + for jj in range(j - wky, j + wky + 1): + for kk in range(k - wkz, k + wkz + 1): + val = f[ii, jj, kk] + ker = g[(nkx - 1 - (wkx + ii - i)), + (nky - 1 - (wky + jj - j)), + (nkz - 1 - (wkz + kk - k))] + if not npy_isnan(val): + top += val * ker + bot += ker + if normalize_by_kernel: + if bot == 0: + conv[i, j, k] = f[i, j, k] + else: + conv[i, j, k] = top / bot + else: + conv[i, j, k] = top + # GIL acquired again here + return conv diff --git a/astropy/convolution/boundary_wrap.c b/astropy/convolution/boundary_wrap.c new file mode 100644 index 0000000..cd010c0 --- /dev/null +++ b/astropy/convolution/boundary_wrap.c @@ -0,0 +1,9903 @@ +/* Generated by Cython 0.27.3 */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_27_3" +#define CYTHON_FUTURE_DIVISION 1 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + + +#define __PYX_ERR(f_index, lineno, Ln_error) \ +{ \ + __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ +} + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__astropy__convolution__boundary_wrap +#define __PYX_HAVE_API__astropy__convolution__boundary_wrap +#include +#include +#include "numpy/arrayobject.h" +#include "numpy/ufuncobject.h" +#include "numpy/npy_math.h" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + +/* Header.proto */ +#if !defined(CYTHON_CCOMPLEX) + #if defined(__cplusplus) + #define CYTHON_CCOMPLEX 1 + #elif defined(_Complex_I) + #define CYTHON_CCOMPLEX 1 + #else + #define CYTHON_CCOMPLEX 0 + #endif +#endif +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #include + #else + #include + #endif +#endif +#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) + #undef _Complex_I + #define _Complex_I 1.0fj +#endif + + +static const char *__pyx_f[] = { + "astropy/convolution/boundary_wrap.pyx", + "__init__.pxd", + "type.pxd", +}; +/* BufferFormatStructs.proto */ +#define IS_UNSIGNED(type) (((type) -1) > 0) +struct __Pyx_StructField_; +#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) +typedef struct { + const char* name; + struct __Pyx_StructField_* fields; + size_t size; + size_t arraysize[8]; + int ndim; + char typegroup; + char is_unsigned; + int flags; +} __Pyx_TypeInfo; +typedef struct __Pyx_StructField_ { + __Pyx_TypeInfo* type; + const char* name; + size_t offset; +} __Pyx_StructField; +typedef struct { + __Pyx_StructField* field; + size_t parent_offset; +} __Pyx_BufFmt_StackElem; +typedef struct { + __Pyx_StructField root; + __Pyx_BufFmt_StackElem* head; + size_t fmt_offset; + size_t new_count, enc_count; + size_t struct_alignment; + int is_complex; + char enc_type; + char new_packmode; + char enc_packmode; + char is_valid_array; +} __Pyx_BufFmt_Context; + +/* NoFastGil.proto */ +#define __Pyx_PyGILState_Ensure PyGILState_Ensure +#define __Pyx_PyGILState_Release PyGILState_Release +#define __Pyx_FastGIL_Remember() +#define __Pyx_FastGIL_Forget() +#define __Pyx_FastGilFuncInit() + +/* ForceInitThreads.proto */ +#ifndef __PYX_FORCE_INIT_THREADS + #define __PYX_FORCE_INIT_THREADS 0 +#endif + + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":743 + * # in Cython to enable them only on the right systems. + * + * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + */ +typedef npy_int8 __pyx_t_5numpy_int8_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":744 + * + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t + */ +typedef npy_int16 __pyx_t_5numpy_int16_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":745 + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< + * ctypedef npy_int64 int64_t + * #ctypedef npy_int96 int96_t + */ +typedef npy_int32 __pyx_t_5numpy_int32_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":746 + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< + * #ctypedef npy_int96 int96_t + * #ctypedef npy_int128 int128_t + */ +typedef npy_int64 __pyx_t_5numpy_int64_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":750 + * #ctypedef npy_int128 int128_t + * + * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + */ +typedef npy_uint8 __pyx_t_5numpy_uint8_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":751 + * + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t + */ +typedef npy_uint16 __pyx_t_5numpy_uint16_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":752 + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< + * ctypedef npy_uint64 uint64_t + * #ctypedef npy_uint96 uint96_t + */ +typedef npy_uint32 __pyx_t_5numpy_uint32_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":753 + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< + * #ctypedef npy_uint96 uint96_t + * #ctypedef npy_uint128 uint128_t + */ +typedef npy_uint64 __pyx_t_5numpy_uint64_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":757 + * #ctypedef npy_uint128 uint128_t + * + * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< + * ctypedef npy_float64 float64_t + * #ctypedef npy_float80 float80_t + */ +typedef npy_float32 __pyx_t_5numpy_float32_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":758 + * + * ctypedef npy_float32 float32_t + * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< + * #ctypedef npy_float80 float80_t + * #ctypedef npy_float128 float128_t + */ +typedef npy_float64 __pyx_t_5numpy_float64_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":767 + * # The int types are mapped a bit surprising -- + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t + */ +typedef npy_long __pyx_t_5numpy_int_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":768 + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong longlong_t + * + */ +typedef npy_longlong __pyx_t_5numpy_long_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":769 + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_ulong uint_t + */ +typedef npy_longlong __pyx_t_5numpy_longlong_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":771 + * ctypedef npy_longlong longlong_t + * + * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t + */ +typedef npy_ulong __pyx_t_5numpy_uint_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":772 + * + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulonglong_t + * + */ +typedef npy_ulonglong __pyx_t_5numpy_ulong_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":773 + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_intp intp_t + */ +typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775 + * ctypedef npy_ulonglong ulonglong_t + * + * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< + * ctypedef npy_uintp uintp_t + * + */ +typedef npy_intp __pyx_t_5numpy_intp_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776 + * + * ctypedef npy_intp intp_t + * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< + * + * ctypedef npy_double float_t + */ +typedef npy_uintp __pyx_t_5numpy_uintp_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778 + * ctypedef npy_uintp uintp_t + * + * ctypedef npy_double float_t # <<<<<<<<<<<<<< + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t + */ +typedef npy_double __pyx_t_5numpy_float_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779 + * + * ctypedef npy_double float_t + * ctypedef npy_double double_t # <<<<<<<<<<<<<< + * ctypedef npy_longdouble longdouble_t + * + */ +typedef npy_double __pyx_t_5numpy_double_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":780 + * ctypedef npy_double float_t + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cfloat cfloat_t + */ +typedef npy_longdouble __pyx_t_5numpy_longdouble_t; + +/* "astropy/convolution/boundary_wrap.pyx":7 + * + * DTYPE = np.float + * ctypedef np.float_t DTYPE_t # <<<<<<<<<<<<<< + * + * cdef extern from "numpy/npy_math.h" nogil: + */ +typedef __pyx_t_5numpy_float_t __pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t; +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< float > __pyx_t_float_complex; + #else + typedef float _Complex __pyx_t_float_complex; + #endif +#else + typedef struct { float real, imag; } __pyx_t_float_complex; +#endif +static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); + +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< double > __pyx_t_double_complex; + #else + typedef double _Complex __pyx_t_double_complex; + #endif +#else + typedef struct { double real, imag; } __pyx_t_double_complex; +#endif +static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); + + +/*--- Type declarations ---*/ + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":782 + * ctypedef npy_longdouble longdouble_t + * + * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t + */ +typedef npy_cfloat __pyx_t_5numpy_cfloat_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":783 + * + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< + * ctypedef npy_clongdouble clongdouble_t + * + */ +typedef npy_cdouble __pyx_t_5numpy_cdouble_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784 + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cdouble complex_t + */ +typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":786 + * ctypedef npy_clongdouble clongdouble_t + * + * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew1(a): + */ +typedef npy_cdouble __pyx_t_5numpy_complex_t; + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* IsLittleEndian.proto */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); + +/* BufferFormatCheck.proto */ +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type); + +/* BufferGetAndValidate.proto */ +#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ + ((obj == Py_None || obj == NULL) ?\ + (__Pyx_ZeroBuffer(buf), 0) :\ + __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) +static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, + __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); +static void __Pyx_ZeroBuffer(Py_buffer* buf); +static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); +static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; +static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + +/* None.proto */ +static CYTHON_INLINE long __Pyx_mod_long(long, long); + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* GetModuleGlobalName.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); + +/* None.proto */ +static CYTHON_INLINE long __Pyx_div_long(long, long); + +/* ExtTypeTest.proto */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); + +/* None.proto */ +static CYTHON_INLINE int __Pyx_mod_int(int, int); + +#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) +#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) +#define __Pyx_BufPtrStrided3d(type, buf, i0, s0, i1, s1, i2, s2) (type)((char*)buf + i0 * s0 + i1 * s1 + i2 * s2) +/* DictGetItem.proto */ +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { + PyObject *value; + value = PyDict_GetItemWithError(d, key); + if (unlikely(!value)) { + if (!PyErr_Occurred()) { + PyObject* args = PyTuple_Pack(1, key); + if (likely(args)) + PyErr_SetObject(PyExc_KeyError, args); + Py_XDECREF(args); + } + return NULL; + } + Py_INCREF(value); + return value; +} +#else + #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) +#endif + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* RaiseNoneIterError.proto */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* BufferStructDeclare.proto */ +typedef struct { + Py_ssize_t shape, strides, suboffsets; +} __Pyx_Buf_DimInfo; +typedef struct { + size_t refcount; + Py_buffer pybuffer; +} __Pyx_Buffer; +typedef struct { + __Pyx_Buffer *rcbuffer; + char *data; + __Pyx_Buf_DimInfo diminfo[8]; +} __Pyx_LocalBuf_ND; + +#if PY_MAJOR_VERSION < 3 + static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); + static void __Pyx_ReleaseBuffer(Py_buffer *view); +#else + #define __Pyx_GetBuffer PyObject_GetBuffer + #define __Pyx_ReleaseBuffer PyBuffer_Release +#endif + + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* RealImag.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #define __Pyx_CREAL(z) ((z).real()) + #define __Pyx_CIMAG(z) ((z).imag()) + #else + #define __Pyx_CREAL(z) (__real__(z)) + #define __Pyx_CIMAG(z) (__imag__(z)) + #endif +#else + #define __Pyx_CREAL(z) ((z).real) + #define __Pyx_CIMAG(z) ((z).imag) +#endif +#if defined(__cplusplus) && CYTHON_CCOMPLEX\ + && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) + #define __Pyx_SET_CREAL(z,x) ((z).real(x)) + #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) +#else + #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) + #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_float(a, b) ((a)==(b)) + #define __Pyx_c_sum_float(a, b) ((a)+(b)) + #define __Pyx_c_diff_float(a, b) ((a)-(b)) + #define __Pyx_c_prod_float(a, b) ((a)*(b)) + #define __Pyx_c_quot_float(a, b) ((a)/(b)) + #define __Pyx_c_neg_float(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_float(z) ((z)==(float)0) + #define __Pyx_c_conj_float(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_float(z) (::std::abs(z)) + #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_float(z) ((z)==0) + #define __Pyx_c_conj_float(z) (conjf(z)) + #if 1 + #define __Pyx_c_abs_float(z) (cabsf(z)) + #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); + #endif +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_double(a, b) ((a)==(b)) + #define __Pyx_c_sum_double(a, b) ((a)+(b)) + #define __Pyx_c_diff_double(a, b) ((a)-(b)) + #define __Pyx_c_prod_double(a, b) ((a)*(b)) + #define __Pyx_c_quot_double(a, b) ((a)/(b)) + #define __Pyx_c_neg_double(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_double(z) ((z)==(double)0) + #define __Pyx_c_conj_double(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (::std::abs(z)) + #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_double(z) ((z)==0) + #define __Pyx_c_conj_double(z) (conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (cabs(z)) + #define __Pyx_c_pow_double(a, b) (cpow(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); + #endif +#endif + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* PyIdentifierFromString.proto */ +#if !defined(__Pyx_PyIdentifier_FromString) +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) +#else + #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) +#endif +#endif + +/* ModuleImport.proto */ +static PyObject *__Pyx_ImportModule(const char *name); + +/* TypeImport.proto */ +static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + + +/* Module declarations from 'cpython.buffer' */ + +/* Module declarations from 'libc.string' */ + +/* Module declarations from 'libc.stdio' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.type' */ +static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; + +/* Module declarations from 'cpython' */ + +/* Module declarations from 'cpython.object' */ + +/* Module declarations from 'cpython.ref' */ + +/* Module declarations from 'cpython.mem' */ + +/* Module declarations from 'numpy' */ + +/* Module declarations from 'numpy' */ +static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; +static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; +static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; +static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; +static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ + +/* Module declarations from 'cython' */ + +/* Module declarations from 'astropy.convolution.boundary_wrap' */ +static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t), { 0 }, 0, 'R', 0, 0 }; +#define __Pyx_MODULE_NAME "astropy.convolution.boundary_wrap" +extern int __pyx_module_is_main_astropy__convolution__boundary_wrap; +int __pyx_module_is_main_astropy__convolution__boundary_wrap = 0; + +/* Implementation of 'astropy.convolution.boundary_wrap' */ +static PyObject *__pyx_builtin_ValueError; +static PyObject *__pyx_builtin_range; +static PyObject *__pyx_builtin_RuntimeError; +static PyObject *__pyx_builtin_ImportError; +static const char __pyx_k_f[] = "f"; +static const char __pyx_k_g[] = "g"; +static const char __pyx_k_i[] = "i"; +static const char __pyx_k_j[] = "j"; +static const char __pyx_k_k[] = "k"; +static const char __pyx_k_ii[] = "ii"; +static const char __pyx_k_jj[] = "jj"; +static const char __pyx_k_kk[] = "kk"; +static const char __pyx_k_np[] = "np"; +static const char __pyx_k_nx[] = "nx"; +static const char __pyx_k_ny[] = "ny"; +static const char __pyx_k_nz[] = "nz"; +static const char __pyx_k_bot[] = "bot"; +static const char __pyx_k_iii[] = "iii"; +static const char __pyx_k_jjj[] = "jjj"; +static const char __pyx_k_ker[] = "ker"; +static const char __pyx_k_kkk[] = "kkk"; +static const char __pyx_k_nkx[] = "nkx"; +static const char __pyx_k_nky[] = "nky"; +static const char __pyx_k_nkz[] = "nkz"; +static const char __pyx_k_top[] = "top"; +static const char __pyx_k_val[] = "val"; +static const char __pyx_k_wkx[] = "wkx"; +static const char __pyx_k_wky[] = "wky"; +static const char __pyx_k_wkz[] = "wkz"; +static const char __pyx_k_conv[] = "conv"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_DTYPE[] = "DTYPE"; +static const char __pyx_k_dtype[] = "dtype"; +static const char __pyx_k_empty[] = "empty"; +static const char __pyx_k_float[] = "float"; +static const char __pyx_k_iimax[] = "iimax"; +static const char __pyx_k_iimin[] = "iimin"; +static const char __pyx_k_jjmax[] = "jjmax"; +static const char __pyx_k_jjmin[] = "jjmin"; +static const char __pyx_k_kkmax[] = "kkmax"; +static const char __pyx_k_kkmin[] = "kkmin"; +static const char __pyx_k_numpy[] = "numpy"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_ValueError[] = "ValueError"; +static const char __pyx_k_ImportError[] = "ImportError"; +static const char __pyx_k_RuntimeError[] = "RuntimeError"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_normalize_by_kernel[] = "normalize_by_kernel"; +static const char __pyx_k_convolve1d_boundary_wrap[] = "convolve1d_boundary_wrap"; +static const char __pyx_k_convolve2d_boundary_wrap[] = "convolve2d_boundary_wrap"; +static const char __pyx_k_convolve3d_boundary_wrap[] = "convolve3d_boundary_wrap"; +static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; +static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; +static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; +static const char __pyx_k_Convolution_kernel_must_have_odd[] = "Convolution kernel must have odd dimensions"; +static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; +static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; +static const char __pyx_k_astropy_convolution_boundary_wra[] = "astropy/convolution/boundary_wrap.pyx"; +static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; +static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; +static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; +static const char __pyx_k_astropy_convolution_boundary_wra_2[] = "astropy.convolution.boundary_wrap"; +static PyObject *__pyx_kp_s_Convolution_kernel_must_have_odd; +static PyObject *__pyx_n_s_DTYPE; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; +static PyObject *__pyx_n_s_ImportError; +static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; +static PyObject *__pyx_n_s_RuntimeError; +static PyObject *__pyx_n_s_ValueError; +static PyObject *__pyx_kp_s_astropy_convolution_boundary_wra; +static PyObject *__pyx_n_s_astropy_convolution_boundary_wra_2; +static PyObject *__pyx_n_s_bot; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_conv; +static PyObject *__pyx_n_s_convolve1d_boundary_wrap; +static PyObject *__pyx_n_s_convolve2d_boundary_wrap; +static PyObject *__pyx_n_s_convolve3d_boundary_wrap; +static PyObject *__pyx_n_s_dtype; +static PyObject *__pyx_n_s_empty; +static PyObject *__pyx_n_s_f; +static PyObject *__pyx_n_s_float; +static PyObject *__pyx_n_s_g; +static PyObject *__pyx_n_s_i; +static PyObject *__pyx_n_s_ii; +static PyObject *__pyx_n_s_iii; +static PyObject *__pyx_n_s_iimax; +static PyObject *__pyx_n_s_iimin; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_n_s_j; +static PyObject *__pyx_n_s_jj; +static PyObject *__pyx_n_s_jjj; +static PyObject *__pyx_n_s_jjmax; +static PyObject *__pyx_n_s_jjmin; +static PyObject *__pyx_n_s_k; +static PyObject *__pyx_n_s_ker; +static PyObject *__pyx_n_s_kk; +static PyObject *__pyx_n_s_kkk; +static PyObject *__pyx_n_s_kkmax; +static PyObject *__pyx_n_s_kkmin; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; +static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; +static PyObject *__pyx_n_s_nkx; +static PyObject *__pyx_n_s_nky; +static PyObject *__pyx_n_s_nkz; +static PyObject *__pyx_n_s_normalize_by_kernel; +static PyObject *__pyx_n_s_np; +static PyObject *__pyx_n_s_numpy; +static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; +static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; +static PyObject *__pyx_n_s_nx; +static PyObject *__pyx_n_s_ny; +static PyObject *__pyx_n_s_nz; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_n_s_top; +static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; +static PyObject *__pyx_n_s_val; +static PyObject *__pyx_n_s_wkx; +static PyObject *__pyx_n_s_wky; +static PyObject *__pyx_n_s_wkz; +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_wrap_convolve1d_boundary_wrap(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel); /* proto */ +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_wrap_2convolve2d_boundary_wrap(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel); /* proto */ +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_wrap_4convolve3d_boundary_wrap(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel); /* proto */ +static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ +static PyObject *__pyx_tuple_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_tuple__7; +static PyObject *__pyx_tuple__8; +static PyObject *__pyx_tuple__9; +static PyObject *__pyx_tuple__10; +static PyObject *__pyx_tuple__11; +static PyObject *__pyx_tuple__12; +static PyObject *__pyx_tuple__13; +static PyObject *__pyx_tuple__15; +static PyObject *__pyx_tuple__17; +static PyObject *__pyx_codeobj__14; +static PyObject *__pyx_codeobj__16; +static PyObject *__pyx_codeobj__18; + +/* "astropy/convolution/boundary_wrap.pyx":16 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_wrap(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * bint normalize_by_kernel): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_wrap_1convolve1d_boundary_wrap(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_11convolution_13boundary_wrap_1convolve1d_boundary_wrap = {"convolve1d_boundary_wrap", (PyCFunction)__pyx_pw_7astropy_11convolution_13boundary_wrap_1convolve1d_boundary_wrap, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_wrap_1convolve1d_boundary_wrap(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_f = 0; + PyArrayObject *__pyx_v_g = 0; + int __pyx_v_normalize_by_kernel; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("convolve1d_boundary_wrap (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_f,&__pyx_n_s_g,&__pyx_n_s_normalize_by_kernel,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_f)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_g)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve1d_boundary_wrap", 1, 3, 3, 1); __PYX_ERR(0, 16, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_normalize_by_kernel)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve1d_boundary_wrap", 1, 3, 3, 2); __PYX_ERR(0, 16, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "convolve1d_boundary_wrap") < 0)) __PYX_ERR(0, 16, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_f = ((PyArrayObject *)values[0]); + __pyx_v_g = ((PyArrayObject *)values[1]); + __pyx_v_normalize_by_kernel = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_normalize_by_kernel == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("convolve1d_boundary_wrap", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 16, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.convolution.boundary_wrap.convolve1d_boundary_wrap", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_f), __pyx_ptype_5numpy_ndarray, 1, "f", 0))) __PYX_ERR(0, 16, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_g), __pyx_ptype_5numpy_ndarray, 1, "g", 0))) __PYX_ERR(0, 17, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_11convolution_13boundary_wrap_convolve1d_boundary_wrap(__pyx_self, __pyx_v_f, __pyx_v_g, __pyx_v_normalize_by_kernel); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_wrap_convolve1d_boundary_wrap(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel) { + int __pyx_v_nx; + int __pyx_v_nkx; + int __pyx_v_wkx; + PyArrayObject *__pyx_v_conv = 0; + unsigned int __pyx_v_i; + unsigned int __pyx_v_iii; + int __pyx_v_ii; + int __pyx_v_iimin; + int __pyx_v_iimax; + __pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t __pyx_v_top; + __pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t __pyx_v_bot; + __pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t __pyx_v_ker; + __pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t __pyx_v_val; + __Pyx_LocalBuf_ND __pyx_pybuffernd_conv; + __Pyx_Buffer __pyx_pybuffer_conv; + __Pyx_LocalBuf_ND __pyx_pybuffernd_f; + __Pyx_Buffer __pyx_pybuffer_f; + __Pyx_LocalBuf_ND __pyx_pybuffernd_g; + __Pyx_Buffer __pyx_pybuffer_g; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + PyArrayObject *__pyx_t_7 = NULL; + int __pyx_t_8; + unsigned int __pyx_t_9; + int __pyx_t_10; + int __pyx_t_11; + size_t __pyx_t_12; + size_t __pyx_t_13; + size_t __pyx_t_14; + size_t __pyx_t_15; + size_t __pyx_t_16; + size_t __pyx_t_17; + __Pyx_RefNannySetupContext("convolve1d_boundary_wrap", 0); + __pyx_pybuffer_conv.pybuffer.buf = NULL; + __pyx_pybuffer_conv.refcount = 0; + __pyx_pybuffernd_conv.data = NULL; + __pyx_pybuffernd_conv.rcbuffer = &__pyx_pybuffer_conv; + __pyx_pybuffer_f.pybuffer.buf = NULL; + __pyx_pybuffer_f.refcount = 0; + __pyx_pybuffernd_f.data = NULL; + __pyx_pybuffernd_f.rcbuffer = &__pyx_pybuffer_f; + __pyx_pybuffer_g.pybuffer.buf = NULL; + __pyx_pybuffer_g.refcount = 0; + __pyx_pybuffernd_g.data = NULL; + __pyx_pybuffernd_g.rcbuffer = &__pyx_pybuffer_g; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_f.rcbuffer->pybuffer, (PyObject*)__pyx_v_f, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 16, __pyx_L1_error) + } + __pyx_pybuffernd_f.diminfo[0].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_f.diminfo[0].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[0]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_g.rcbuffer->pybuffer, (PyObject*)__pyx_v_g, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 16, __pyx_L1_error) + } + __pyx_pybuffernd_g.diminfo[0].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_g.diminfo[0].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[0]; + + /* "astropy/convolution/boundary_wrap.pyx":20 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + __pyx_t_1 = ((__Pyx_mod_long((__pyx_v_g->dimensions[0]), 2) != 1) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_wrap.pyx":21 + * + * if g.shape[0] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(0, 21, __pyx_L1_error) + + /* "astropy/convolution/boundary_wrap.pyx":20 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + } + + /* "astropy/convolution/boundary_wrap.pyx":23 + * raise ValueError("Convolution kernel must have odd dimensions") + * + * assert f.dtype == DTYPE and g.dtype == DTYPE # <<<<<<<<<<<<<< + * + * cdef int nx = f.shape[0] + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_f), __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 23, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_5) { + } else { + __pyx_t_1 = __pyx_t_5; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_g), __pyx_n_s_dtype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = PyObject_RichCompare(__pyx_t_4, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 23, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_1 = __pyx_t_5; + __pyx_L4_bool_binop_done:; + if (unlikely(!__pyx_t_1)) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 23, __pyx_L1_error) + } + } + #endif + + /* "astropy/convolution/boundary_wrap.pyx":25 + * assert f.dtype == DTYPE and g.dtype == DTYPE + * + * cdef int nx = f.shape[0] # <<<<<<<<<<<<<< + * cdef int nkx = g.shape[0] + * cdef int wkx = nkx // 2 + */ + __pyx_v_nx = (__pyx_v_f->dimensions[0]); + + /* "astropy/convolution/boundary_wrap.pyx":26 + * + * cdef int nx = f.shape[0] + * cdef int nkx = g.shape[0] # <<<<<<<<<<<<<< + * cdef int wkx = nkx // 2 + * cdef np.ndarray[DTYPE_t, ndim=1] conv = np.empty([nx], dtype=DTYPE) + */ + __pyx_v_nkx = (__pyx_v_g->dimensions[0]); + + /* "astropy/convolution/boundary_wrap.pyx":27 + * cdef int nx = f.shape[0] + * cdef int nkx = g.shape[0] + * cdef int wkx = nkx // 2 # <<<<<<<<<<<<<< + * cdef np.ndarray[DTYPE_t, ndim=1] conv = np.empty([nx], dtype=DTYPE) + * cdef unsigned int i, iii + */ + __pyx_v_wkx = __Pyx_div_long(__pyx_v_nkx, 2); + + /* "astropy/convolution/boundary_wrap.pyx":28 + * cdef int nkx = g.shape[0] + * cdef int wkx = nkx // 2 + * cdef np.ndarray[DTYPE_t, ndim=1] conv = np.empty([nx], dtype=DTYPE) # <<<<<<<<<<<<<< + * cdef unsigned int i, iii + * cdef int ii + */ + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 28, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_empty); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 28, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 28, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = PyList_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 28, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_2); + PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); + __pyx_t_2 = 0; + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 28, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 28, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 28, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 28, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 28, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 28, __pyx_L1_error) + __pyx_t_7 = ((PyArrayObject *)__pyx_t_6); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_conv.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { + __pyx_v_conv = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 28, __pyx_L1_error) + } else {__pyx_pybuffernd_conv.diminfo[0].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_conv.diminfo[0].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[0]; + } + } + __pyx_t_7 = 0; + __pyx_v_conv = ((PyArrayObject *)__pyx_t_6); + __pyx_t_6 = 0; + + /* "astropy/convolution/boundary_wrap.pyx":37 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + + /* "astropy/convolution/boundary_wrap.pyx":40 + * + * # Now run the proper convolution + * for i in range(nx): # <<<<<<<<<<<<<< + * top = 0. + * bot = 0. + */ + __pyx_t_8 = __pyx_v_nx; + for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { + __pyx_v_i = __pyx_t_9; + + /* "astropy/convolution/boundary_wrap.pyx":41 + * # Now run the proper convolution + * for i in range(nx): + * top = 0. # <<<<<<<<<<<<<< + * bot = 0. + * iimin = i - wkx + */ + __pyx_v_top = 0.; + + /* "astropy/convolution/boundary_wrap.pyx":42 + * for i in range(nx): + * top = 0. + * bot = 0. # <<<<<<<<<<<<<< + * iimin = i - wkx + * iimax = i + wkx + 1 + */ + __pyx_v_bot = 0.; + + /* "astropy/convolution/boundary_wrap.pyx":43 + * top = 0. + * bot = 0. + * iimin = i - wkx # <<<<<<<<<<<<<< + * iimax = i + wkx + 1 + * for ii in range(iimin, iimax): + */ + __pyx_v_iimin = (__pyx_v_i - __pyx_v_wkx); + + /* "astropy/convolution/boundary_wrap.pyx":44 + * bot = 0. + * iimin = i - wkx + * iimax = i + wkx + 1 # <<<<<<<<<<<<<< + * for ii in range(iimin, iimax): + * iii = ii % nx + */ + __pyx_v_iimax = ((__pyx_v_i + __pyx_v_wkx) + 1); + + /* "astropy/convolution/boundary_wrap.pyx":45 + * iimin = i - wkx + * iimax = i + wkx + 1 + * for ii in range(iimin, iimax): # <<<<<<<<<<<<<< + * iii = ii % nx + * val = f[iii] + */ + __pyx_t_10 = __pyx_v_iimax; + for (__pyx_t_11 = __pyx_v_iimin; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { + __pyx_v_ii = __pyx_t_11; + + /* "astropy/convolution/boundary_wrap.pyx":46 + * iimax = i + wkx + 1 + * for ii in range(iimin, iimax): + * iii = ii % nx # <<<<<<<<<<<<<< + * val = f[iii] + * ker = g[(nkx - 1 - (wkx + ii - i))] + */ + if (unlikely(__pyx_v_nx == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 46, __pyx_L7_error) + } + __pyx_v_iii = __Pyx_mod_int(__pyx_v_ii, __pyx_v_nx); + + /* "astropy/convolution/boundary_wrap.pyx":47 + * for ii in range(iimin, iimax): + * iii = ii % nx + * val = f[iii] # <<<<<<<<<<<<<< + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): + */ + __pyx_t_12 = __pyx_v_iii; + __pyx_v_val = (*__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_f.diminfo[0].strides)); + + /* "astropy/convolution/boundary_wrap.pyx":48 + * iii = ii % nx + * val = f[iii] + * ker = g[(nkx - 1 - (wkx + ii - i))] # <<<<<<<<<<<<<< + * if not npy_isnan(val): + * top += val * ker + */ + __pyx_t_13 = ((unsigned int)((__pyx_v_nkx - 1) - ((__pyx_v_wkx + __pyx_v_ii) - __pyx_v_i))); + __pyx_v_ker = (*__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_g.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_g.diminfo[0].strides)); + + /* "astropy/convolution/boundary_wrap.pyx":49 + * val = f[iii] + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + __pyx_t_1 = ((!(npy_isnan(__pyx_v_val) != 0)) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_wrap.pyx":50 + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): + * top += val * ker # <<<<<<<<<<<<<< + * bot += ker + * if normalize_by_kernel: + */ + __pyx_v_top = (__pyx_v_top + (__pyx_v_val * __pyx_v_ker)); + + /* "astropy/convolution/boundary_wrap.pyx":51 + * if not npy_isnan(val): + * top += val * ker + * bot += ker # <<<<<<<<<<<<<< + * if normalize_by_kernel: + * if bot == 0: + */ + __pyx_v_bot = (__pyx_v_bot + __pyx_v_ker); + + /* "astropy/convolution/boundary_wrap.pyx":49 + * val = f[iii] + * ker = g[(nkx - 1 - (wkx + ii - i))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + } + } + + /* "astropy/convolution/boundary_wrap.pyx":52 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i] = f[i] + */ + __pyx_t_1 = (__pyx_v_normalize_by_kernel != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_wrap.pyx":53 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i] = f[i] + * else: + */ + __pyx_t_1 = ((__pyx_v_bot == 0.0) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_wrap.pyx":54 + * if normalize_by_kernel: + * if bot == 0: + * conv[i] = f[i] # <<<<<<<<<<<<<< + * else: + * conv[i] = top / bot + */ + __pyx_t_14 = __pyx_v_i; + __pyx_t_15 = __pyx_v_i; + *__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_conv.diminfo[0].strides) = (*__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_f.diminfo[0].strides)); + + /* "astropy/convolution/boundary_wrap.pyx":53 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i] = f[i] + * else: + */ + goto __pyx_L15; + } + + /* "astropy/convolution/boundary_wrap.pyx":56 + * conv[i] = f[i] + * else: + * conv[i] = top / bot # <<<<<<<<<<<<<< + * else: + * conv[i] = top + */ + /*else*/ { + if (unlikely(__pyx_v_bot == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 56, __pyx_L7_error) + } + __pyx_t_16 = __pyx_v_i; + *__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_conv.diminfo[0].strides) = (__pyx_v_top / __pyx_v_bot); + } + __pyx_L15:; + + /* "astropy/convolution/boundary_wrap.pyx":52 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i] = f[i] + */ + goto __pyx_L14; + } + + /* "astropy/convolution/boundary_wrap.pyx":58 + * conv[i] = top / bot + * else: + * conv[i] = top # <<<<<<<<<<<<<< + * # GIL acquired again here + * return conv + */ + /*else*/ { + __pyx_t_17 = __pyx_v_i; + *__Pyx_BufPtrStrided1d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_conv.diminfo[0].strides) = __pyx_v_top; + } + __pyx_L14:; + } + } + + /* "astropy/convolution/boundary_wrap.pyx":37 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L8; + } + __pyx_L7_error: { + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L1_error; + } + __pyx_L8:; + } + } + + /* "astropy/convolution/boundary_wrap.pyx":60 + * conv[i] = top + * # GIL acquired again here + * return conv # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_conv)); + __pyx_r = ((PyObject *)__pyx_v_conv); + goto __pyx_L0; + + /* "astropy/convolution/boundary_wrap.pyx":16 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_wrap(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * bint normalize_by_kernel): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_6); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("astropy.convolution.boundary_wrap.convolve1d_boundary_wrap", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_conv); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/convolution/boundary_wrap.pyx":64 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_wrap(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * bint normalize_by_kernel): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_wrap_3convolve2d_boundary_wrap(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_11convolution_13boundary_wrap_3convolve2d_boundary_wrap = {"convolve2d_boundary_wrap", (PyCFunction)__pyx_pw_7astropy_11convolution_13boundary_wrap_3convolve2d_boundary_wrap, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_wrap_3convolve2d_boundary_wrap(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_f = 0; + PyArrayObject *__pyx_v_g = 0; + int __pyx_v_normalize_by_kernel; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("convolve2d_boundary_wrap (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_f,&__pyx_n_s_g,&__pyx_n_s_normalize_by_kernel,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_f)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_g)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve2d_boundary_wrap", 1, 3, 3, 1); __PYX_ERR(0, 64, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_normalize_by_kernel)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve2d_boundary_wrap", 1, 3, 3, 2); __PYX_ERR(0, 64, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "convolve2d_boundary_wrap") < 0)) __PYX_ERR(0, 64, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_f = ((PyArrayObject *)values[0]); + __pyx_v_g = ((PyArrayObject *)values[1]); + __pyx_v_normalize_by_kernel = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_normalize_by_kernel == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 66, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("convolve2d_boundary_wrap", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 64, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.convolution.boundary_wrap.convolve2d_boundary_wrap", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_f), __pyx_ptype_5numpy_ndarray, 1, "f", 0))) __PYX_ERR(0, 64, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_g), __pyx_ptype_5numpy_ndarray, 1, "g", 0))) __PYX_ERR(0, 65, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_11convolution_13boundary_wrap_2convolve2d_boundary_wrap(__pyx_self, __pyx_v_f, __pyx_v_g, __pyx_v_normalize_by_kernel); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_wrap_2convolve2d_boundary_wrap(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel) { + int __pyx_v_nx; + int __pyx_v_ny; + int __pyx_v_nkx; + int __pyx_v_nky; + int __pyx_v_wkx; + int __pyx_v_wky; + PyArrayObject *__pyx_v_conv = 0; + unsigned int __pyx_v_i; + unsigned int __pyx_v_j; + unsigned int __pyx_v_iii; + unsigned int __pyx_v_jjj; + int __pyx_v_ii; + int __pyx_v_jj; + int __pyx_v_iimin; + int __pyx_v_iimax; + int __pyx_v_jjmin; + int __pyx_v_jjmax; + __pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t __pyx_v_top; + __pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t __pyx_v_bot; + __pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t __pyx_v_ker; + __pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t __pyx_v_val; + __Pyx_LocalBuf_ND __pyx_pybuffernd_conv; + __Pyx_Buffer __pyx_pybuffer_conv; + __Pyx_LocalBuf_ND __pyx_pybuffernd_f; + __Pyx_Buffer __pyx_pybuffer_f; + __Pyx_LocalBuf_ND __pyx_pybuffernd_g; + __Pyx_Buffer __pyx_pybuffer_g; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyArrayObject *__pyx_t_7 = NULL; + int __pyx_t_8; + unsigned int __pyx_t_9; + int __pyx_t_10; + unsigned int __pyx_t_11; + int __pyx_t_12; + int __pyx_t_13; + int __pyx_t_14; + int __pyx_t_15; + size_t __pyx_t_16; + size_t __pyx_t_17; + size_t __pyx_t_18; + size_t __pyx_t_19; + size_t __pyx_t_20; + size_t __pyx_t_21; + size_t __pyx_t_22; + size_t __pyx_t_23; + size_t __pyx_t_24; + size_t __pyx_t_25; + size_t __pyx_t_26; + size_t __pyx_t_27; + __Pyx_RefNannySetupContext("convolve2d_boundary_wrap", 0); + __pyx_pybuffer_conv.pybuffer.buf = NULL; + __pyx_pybuffer_conv.refcount = 0; + __pyx_pybuffernd_conv.data = NULL; + __pyx_pybuffernd_conv.rcbuffer = &__pyx_pybuffer_conv; + __pyx_pybuffer_f.pybuffer.buf = NULL; + __pyx_pybuffer_f.refcount = 0; + __pyx_pybuffernd_f.data = NULL; + __pyx_pybuffernd_f.rcbuffer = &__pyx_pybuffer_f; + __pyx_pybuffer_g.pybuffer.buf = NULL; + __pyx_pybuffer_g.refcount = 0; + __pyx_pybuffernd_g.data = NULL; + __pyx_pybuffernd_g.rcbuffer = &__pyx_pybuffer_g; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_f.rcbuffer->pybuffer, (PyObject*)__pyx_v_f, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 64, __pyx_L1_error) + } + __pyx_pybuffernd_f.diminfo[0].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_f.diminfo[0].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_f.diminfo[1].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_f.diminfo[1].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[1]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_g.rcbuffer->pybuffer, (PyObject*)__pyx_v_g, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 64, __pyx_L1_error) + } + __pyx_pybuffernd_g.diminfo[0].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_g.diminfo[0].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_g.diminfo[1].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_g.diminfo[1].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[1]; + + /* "astropy/convolution/boundary_wrap.pyx":68 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[0]), 2) != 1) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[1]), 2) != 1) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_wrap.pyx":69 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(0, 69, __pyx_L1_error) + + /* "astropy/convolution/boundary_wrap.pyx":68 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + } + + /* "astropy/convolution/boundary_wrap.pyx":71 + * raise ValueError("Convolution kernel must have odd dimensions") + * + * assert f.dtype == DTYPE and g.dtype == DTYPE # <<<<<<<<<<<<<< + * + * cdef int nx = f.shape[0] + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_f), __pyx_n_s_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L6_bool_binop_done; + } + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_g), __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_1 = __pyx_t_2; + __pyx_L6_bool_binop_done:; + if (unlikely(!__pyx_t_1)) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 71, __pyx_L1_error) + } + } + #endif + + /* "astropy/convolution/boundary_wrap.pyx":73 + * assert f.dtype == DTYPE and g.dtype == DTYPE + * + * cdef int nx = f.shape[0] # <<<<<<<<<<<<<< + * cdef int ny = f.shape[1] + * cdef int nkx = g.shape[0] + */ + __pyx_v_nx = (__pyx_v_f->dimensions[0]); + + /* "astropy/convolution/boundary_wrap.pyx":74 + * + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] # <<<<<<<<<<<<<< + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + */ + __pyx_v_ny = (__pyx_v_f->dimensions[1]); + + /* "astropy/convolution/boundary_wrap.pyx":75 + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] + * cdef int nkx = g.shape[0] # <<<<<<<<<<<<<< + * cdef int nky = g.shape[1] + * cdef int wkx = nkx // 2 + */ + __pyx_v_nkx = (__pyx_v_g->dimensions[0]); + + /* "astropy/convolution/boundary_wrap.pyx":76 + * cdef int ny = f.shape[1] + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] # <<<<<<<<<<<<<< + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + */ + __pyx_v_nky = (__pyx_v_g->dimensions[1]); + + /* "astropy/convolution/boundary_wrap.pyx":77 + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + * cdef int wkx = nkx // 2 # <<<<<<<<<<<<<< + * cdef int wky = nky // 2 + * cdef np.ndarray[DTYPE_t, ndim=2] conv = np.empty([nx, ny], dtype=DTYPE) + */ + __pyx_v_wkx = __Pyx_div_long(__pyx_v_nkx, 2); + + /* "astropy/convolution/boundary_wrap.pyx":78 + * cdef int nky = g.shape[1] + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 # <<<<<<<<<<<<<< + * cdef np.ndarray[DTYPE_t, ndim=2] conv = np.empty([nx, ny], dtype=DTYPE) + * cdef unsigned int i, j, iii, jjj + */ + __pyx_v_wky = __Pyx_div_long(__pyx_v_nky, 2); + + /* "astropy/convolution/boundary_wrap.pyx":79 + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + * cdef np.ndarray[DTYPE_t, ndim=2] conv = np.empty([nx, ny], dtype=DTYPE) # <<<<<<<<<<<<<< + * cdef unsigned int i, j, iii, jjj + * cdef int ii, jj + */ + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 79, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 79, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 79, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_ny); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 79, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 79, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_3); + PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_5); + PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_5); + __pyx_t_3 = 0; + __pyx_t_5 = 0; + __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 79, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 79, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 79, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 79, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 79, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 79, __pyx_L1_error) + __pyx_t_7 = ((PyArrayObject *)__pyx_t_3); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_conv.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { + __pyx_v_conv = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 79, __pyx_L1_error) + } else {__pyx_pybuffernd_conv.diminfo[0].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_conv.diminfo[0].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_conv.diminfo[1].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_conv.diminfo[1].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[1]; + } + } + __pyx_t_7 = 0; + __pyx_v_conv = ((PyArrayObject *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "astropy/convolution/boundary_wrap.pyx":88 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + + /* "astropy/convolution/boundary_wrap.pyx":91 + * + * # Now run the proper convolution + * for i in range(nx): # <<<<<<<<<<<<<< + * for j in range(ny): + * top = 0. + */ + __pyx_t_8 = __pyx_v_nx; + for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { + __pyx_v_i = __pyx_t_9; + + /* "astropy/convolution/boundary_wrap.pyx":92 + * # Now run the proper convolution + * for i in range(nx): + * for j in range(ny): # <<<<<<<<<<<<<< + * top = 0. + * bot = 0. + */ + __pyx_t_10 = __pyx_v_ny; + for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { + __pyx_v_j = __pyx_t_11; + + /* "astropy/convolution/boundary_wrap.pyx":93 + * for i in range(nx): + * for j in range(ny): + * top = 0. # <<<<<<<<<<<<<< + * bot = 0. + * iimin = i - wkx + */ + __pyx_v_top = 0.; + + /* "astropy/convolution/boundary_wrap.pyx":94 + * for j in range(ny): + * top = 0. + * bot = 0. # <<<<<<<<<<<<<< + * iimin = i - wkx + * iimax = i + wkx + 1 + */ + __pyx_v_bot = 0.; + + /* "astropy/convolution/boundary_wrap.pyx":95 + * top = 0. + * bot = 0. + * iimin = i - wkx # <<<<<<<<<<<<<< + * iimax = i + wkx + 1 + * jjmin = j - wky + */ + __pyx_v_iimin = (__pyx_v_i - __pyx_v_wkx); + + /* "astropy/convolution/boundary_wrap.pyx":96 + * bot = 0. + * iimin = i - wkx + * iimax = i + wkx + 1 # <<<<<<<<<<<<<< + * jjmin = j - wky + * jjmax = j + wky + 1 + */ + __pyx_v_iimax = ((__pyx_v_i + __pyx_v_wkx) + 1); + + /* "astropy/convolution/boundary_wrap.pyx":97 + * iimin = i - wkx + * iimax = i + wkx + 1 + * jjmin = j - wky # <<<<<<<<<<<<<< + * jjmax = j + wky + 1 + * for ii in range(iimin, iimax): + */ + __pyx_v_jjmin = (__pyx_v_j - __pyx_v_wky); + + /* "astropy/convolution/boundary_wrap.pyx":98 + * iimax = i + wkx + 1 + * jjmin = j - wky + * jjmax = j + wky + 1 # <<<<<<<<<<<<<< + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): + */ + __pyx_v_jjmax = ((__pyx_v_j + __pyx_v_wky) + 1); + + /* "astropy/convolution/boundary_wrap.pyx":99 + * jjmin = j - wky + * jjmax = j + wky + 1 + * for ii in range(iimin, iimax): # <<<<<<<<<<<<<< + * for jj in range(jjmin, jjmax): + * iii = ii % nx + */ + __pyx_t_12 = __pyx_v_iimax; + for (__pyx_t_13 = __pyx_v_iimin; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { + __pyx_v_ii = __pyx_t_13; + + /* "astropy/convolution/boundary_wrap.pyx":100 + * jjmax = j + wky + 1 + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): # <<<<<<<<<<<<<< + * iii = ii % nx + * jjj = jj % ny + */ + __pyx_t_14 = __pyx_v_jjmax; + for (__pyx_t_15 = __pyx_v_jjmin; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { + __pyx_v_jj = __pyx_t_15; + + /* "astropy/convolution/boundary_wrap.pyx":101 + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): + * iii = ii % nx # <<<<<<<<<<<<<< + * jjj = jj % ny + * val = f[iii, jjj] + */ + if (unlikely(__pyx_v_nx == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 101, __pyx_L9_error) + } + __pyx_v_iii = __Pyx_mod_int(__pyx_v_ii, __pyx_v_nx); + + /* "astropy/convolution/boundary_wrap.pyx":102 + * for jj in range(jjmin, jjmax): + * iii = ii % nx + * jjj = jj % ny # <<<<<<<<<<<<<< + * val = f[iii, jjj] + * ker = g[(nkx - 1 - (wkx + ii - i)), + */ + if (unlikely(__pyx_v_ny == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 102, __pyx_L9_error) + } + __pyx_v_jjj = __Pyx_mod_int(__pyx_v_jj, __pyx_v_ny); + + /* "astropy/convolution/boundary_wrap.pyx":103 + * iii = ii % nx + * jjj = jj % ny + * val = f[iii, jjj] # <<<<<<<<<<<<<< + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j))] + */ + __pyx_t_16 = __pyx_v_iii; + __pyx_t_17 = __pyx_v_jjj; + __pyx_v_val = (*__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_f.diminfo[1].strides)); + + /* "astropy/convolution/boundary_wrap.pyx":104 + * jjj = jj % ny + * val = f[iii, jjj] + * ker = g[(nkx - 1 - (wkx + ii - i)), # <<<<<<<<<<<<<< + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): + */ + __pyx_t_18 = ((unsigned int)((__pyx_v_nkx - 1) - ((__pyx_v_wkx + __pyx_v_ii) - __pyx_v_i))); + __pyx_t_19 = ((unsigned int)((__pyx_v_nky - 1) - ((__pyx_v_wky + __pyx_v_jj) - __pyx_v_j))); + __pyx_v_ker = (*__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_g.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_g.diminfo[0].strides, __pyx_t_19, __pyx_pybuffernd_g.diminfo[1].strides)); + + /* "astropy/convolution/boundary_wrap.pyx":106 + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + __pyx_t_1 = ((!(npy_isnan(__pyx_v_val) != 0)) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_wrap.pyx":107 + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): + * top += val * ker # <<<<<<<<<<<<<< + * bot += ker + * if normalize_by_kernel: + */ + __pyx_v_top = (__pyx_v_top + (__pyx_v_val * __pyx_v_ker)); + + /* "astropy/convolution/boundary_wrap.pyx":108 + * if not npy_isnan(val): + * top += val * ker + * bot += ker # <<<<<<<<<<<<<< + * if normalize_by_kernel: + * if bot == 0: + */ + __pyx_v_bot = (__pyx_v_bot + __pyx_v_ker); + + /* "astropy/convolution/boundary_wrap.pyx":106 + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + } + } + } + + /* "astropy/convolution/boundary_wrap.pyx":109 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j] = f[i, j] + */ + __pyx_t_1 = (__pyx_v_normalize_by_kernel != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_wrap.pyx":110 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j] = f[i, j] + * else: + */ + __pyx_t_1 = ((__pyx_v_bot == 0.0) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_wrap.pyx":111 + * if normalize_by_kernel: + * if bot == 0: + * conv[i, j] = f[i, j] # <<<<<<<<<<<<<< + * else: + * conv[i, j] = top / bot + */ + __pyx_t_20 = __pyx_v_i; + __pyx_t_21 = __pyx_v_j; + __pyx_t_22 = __pyx_v_i; + __pyx_t_23 = __pyx_v_j; + *__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_23, __pyx_pybuffernd_conv.diminfo[1].strides) = (*__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_20, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_21, __pyx_pybuffernd_f.diminfo[1].strides)); + + /* "astropy/convolution/boundary_wrap.pyx":110 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j] = f[i, j] + * else: + */ + goto __pyx_L21; + } + + /* "astropy/convolution/boundary_wrap.pyx":113 + * conv[i, j] = f[i, j] + * else: + * conv[i, j] = top / bot # <<<<<<<<<<<<<< + * else: + * conv[i, j] = top + */ + /*else*/ { + if (unlikely(__pyx_v_bot == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 113, __pyx_L9_error) + } + __pyx_t_24 = __pyx_v_i; + __pyx_t_25 = __pyx_v_j; + *__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_24, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_25, __pyx_pybuffernd_conv.diminfo[1].strides) = (__pyx_v_top / __pyx_v_bot); + } + __pyx_L21:; + + /* "astropy/convolution/boundary_wrap.pyx":109 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j] = f[i, j] + */ + goto __pyx_L20; + } + + /* "astropy/convolution/boundary_wrap.pyx":115 + * conv[i, j] = top / bot + * else: + * conv[i, j] = top # <<<<<<<<<<<<<< + * # GIl acquired again here + * return conv + */ + /*else*/ { + __pyx_t_26 = __pyx_v_i; + __pyx_t_27 = __pyx_v_j; + *__Pyx_BufPtrStrided2d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_26, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_27, __pyx_pybuffernd_conv.diminfo[1].strides) = __pyx_v_top; + } + __pyx_L20:; + } + } + } + + /* "astropy/convolution/boundary_wrap.pyx":88 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L10; + } + __pyx_L9_error: { + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L1_error; + } + __pyx_L10:; + } + } + + /* "astropy/convolution/boundary_wrap.pyx":117 + * conv[i, j] = top + * # GIl acquired again here + * return conv # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_conv)); + __pyx_r = ((PyObject *)__pyx_v_conv); + goto __pyx_L0; + + /* "astropy/convolution/boundary_wrap.pyx":64 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_wrap(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * bint normalize_by_kernel): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("astropy.convolution.boundary_wrap.convolve2d_boundary_wrap", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_conv); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/convolution/boundary_wrap.pyx":121 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_wrap(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * bint normalize_by_kernel): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_wrap_5convolve3d_boundary_wrap(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_11convolution_13boundary_wrap_5convolve3d_boundary_wrap = {"convolve3d_boundary_wrap", (PyCFunction)__pyx_pw_7astropy_11convolution_13boundary_wrap_5convolve3d_boundary_wrap, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_11convolution_13boundary_wrap_5convolve3d_boundary_wrap(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_f = 0; + PyArrayObject *__pyx_v_g = 0; + int __pyx_v_normalize_by_kernel; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("convolve3d_boundary_wrap (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_f,&__pyx_n_s_g,&__pyx_n_s_normalize_by_kernel,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_f)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_g)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve3d_boundary_wrap", 1, 3, 3, 1); __PYX_ERR(0, 121, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_normalize_by_kernel)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("convolve3d_boundary_wrap", 1, 3, 3, 2); __PYX_ERR(0, 121, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "convolve3d_boundary_wrap") < 0)) __PYX_ERR(0, 121, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_f = ((PyArrayObject *)values[0]); + __pyx_v_g = ((PyArrayObject *)values[1]); + __pyx_v_normalize_by_kernel = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_normalize_by_kernel == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 123, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("convolve3d_boundary_wrap", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 121, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.convolution.boundary_wrap.convolve3d_boundary_wrap", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_f), __pyx_ptype_5numpy_ndarray, 1, "f", 0))) __PYX_ERR(0, 121, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_g), __pyx_ptype_5numpy_ndarray, 1, "g", 0))) __PYX_ERR(0, 122, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_11convolution_13boundary_wrap_4convolve3d_boundary_wrap(__pyx_self, __pyx_v_f, __pyx_v_g, __pyx_v_normalize_by_kernel); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_11convolution_13boundary_wrap_4convolve3d_boundary_wrap(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_f, PyArrayObject *__pyx_v_g, int __pyx_v_normalize_by_kernel) { + int __pyx_v_nx; + int __pyx_v_ny; + int __pyx_v_nz; + int __pyx_v_nkx; + int __pyx_v_nky; + int __pyx_v_nkz; + int __pyx_v_wkx; + int __pyx_v_wky; + int __pyx_v_wkz; + PyArrayObject *__pyx_v_conv = 0; + unsigned int __pyx_v_i; + unsigned int __pyx_v_j; + unsigned int __pyx_v_k; + unsigned int __pyx_v_iii; + unsigned int __pyx_v_jjj; + unsigned int __pyx_v_kkk; + int __pyx_v_ii; + int __pyx_v_jj; + int __pyx_v_kk; + int __pyx_v_iimin; + int __pyx_v_iimax; + int __pyx_v_jjmin; + int __pyx_v_jjmax; + int __pyx_v_kkmin; + int __pyx_v_kkmax; + __pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t __pyx_v_top; + __pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t __pyx_v_bot; + __pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t __pyx_v_ker; + __pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t __pyx_v_val; + __Pyx_LocalBuf_ND __pyx_pybuffernd_conv; + __Pyx_Buffer __pyx_pybuffer_conv; + __Pyx_LocalBuf_ND __pyx_pybuffernd_f; + __Pyx_Buffer __pyx_pybuffer_f; + __Pyx_LocalBuf_ND __pyx_pybuffernd_g; + __Pyx_Buffer __pyx_pybuffer_g; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyArrayObject *__pyx_t_8 = NULL; + int __pyx_t_9; + unsigned int __pyx_t_10; + int __pyx_t_11; + unsigned int __pyx_t_12; + int __pyx_t_13; + unsigned int __pyx_t_14; + int __pyx_t_15; + int __pyx_t_16; + int __pyx_t_17; + int __pyx_t_18; + int __pyx_t_19; + int __pyx_t_20; + size_t __pyx_t_21; + size_t __pyx_t_22; + size_t __pyx_t_23; + size_t __pyx_t_24; + size_t __pyx_t_25; + size_t __pyx_t_26; + size_t __pyx_t_27; + size_t __pyx_t_28; + size_t __pyx_t_29; + size_t __pyx_t_30; + size_t __pyx_t_31; + size_t __pyx_t_32; + size_t __pyx_t_33; + size_t __pyx_t_34; + size_t __pyx_t_35; + size_t __pyx_t_36; + size_t __pyx_t_37; + size_t __pyx_t_38; + __Pyx_RefNannySetupContext("convolve3d_boundary_wrap", 0); + __pyx_pybuffer_conv.pybuffer.buf = NULL; + __pyx_pybuffer_conv.refcount = 0; + __pyx_pybuffernd_conv.data = NULL; + __pyx_pybuffernd_conv.rcbuffer = &__pyx_pybuffer_conv; + __pyx_pybuffer_f.pybuffer.buf = NULL; + __pyx_pybuffer_f.refcount = 0; + __pyx_pybuffernd_f.data = NULL; + __pyx_pybuffernd_f.rcbuffer = &__pyx_pybuffer_f; + __pyx_pybuffer_g.pybuffer.buf = NULL; + __pyx_pybuffer_g.refcount = 0; + __pyx_pybuffernd_g.data = NULL; + __pyx_pybuffernd_g.rcbuffer = &__pyx_pybuffer_g; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_f.rcbuffer->pybuffer, (PyObject*)__pyx_v_f, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 121, __pyx_L1_error) + } + __pyx_pybuffernd_f.diminfo[0].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_f.diminfo[0].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_f.diminfo[1].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_f.diminfo[1].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_f.diminfo[2].strides = __pyx_pybuffernd_f.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_f.diminfo[2].shape = __pyx_pybuffernd_f.rcbuffer->pybuffer.shape[2]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_g.rcbuffer->pybuffer, (PyObject*)__pyx_v_g, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 121, __pyx_L1_error) + } + __pyx_pybuffernd_g.diminfo[0].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_g.diminfo[0].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_g.diminfo[1].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_g.diminfo[1].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_g.diminfo[2].strides = __pyx_pybuffernd_g.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_g.diminfo[2].shape = __pyx_pybuffernd_g.rcbuffer->pybuffer.shape[2]; + + /* "astropy/convolution/boundary_wrap.pyx":125 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[0]), 2) != 1) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[1]), 2) != 1) != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = ((__Pyx_mod_long((__pyx_v_g->dimensions[2]), 2) != 1) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_wrap.pyx":126 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(0, 126, __pyx_L1_error) + + /* "astropy/convolution/boundary_wrap.pyx":125 + * bint normalize_by_kernel): + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: # <<<<<<<<<<<<<< + * raise ValueError("Convolution kernel must have odd dimensions") + * + */ + } + + /* "astropy/convolution/boundary_wrap.pyx":128 + * raise ValueError("Convolution kernel must have odd dimensions") + * + * assert f.dtype == DTYPE and g.dtype == DTYPE # <<<<<<<<<<<<<< + * + * cdef int nx = f.shape[0] + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_f), __pyx_n_s_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 128, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 128, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 128, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 128, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_g), __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 128, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 128, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 128, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 128, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_1 = __pyx_t_2; + __pyx_L7_bool_binop_done:; + if (unlikely(!__pyx_t_1)) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 128, __pyx_L1_error) + } + } + #endif + + /* "astropy/convolution/boundary_wrap.pyx":130 + * assert f.dtype == DTYPE and g.dtype == DTYPE + * + * cdef int nx = f.shape[0] # <<<<<<<<<<<<<< + * cdef int ny = f.shape[1] + * cdef int nz = f.shape[2] + */ + __pyx_v_nx = (__pyx_v_f->dimensions[0]); + + /* "astropy/convolution/boundary_wrap.pyx":131 + * + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] # <<<<<<<<<<<<<< + * cdef int nz = f.shape[2] + * cdef int nkx = g.shape[0] + */ + __pyx_v_ny = (__pyx_v_f->dimensions[1]); + + /* "astropy/convolution/boundary_wrap.pyx":132 + * cdef int nx = f.shape[0] + * cdef int ny = f.shape[1] + * cdef int nz = f.shape[2] # <<<<<<<<<<<<<< + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + */ + __pyx_v_nz = (__pyx_v_f->dimensions[2]); + + /* "astropy/convolution/boundary_wrap.pyx":133 + * cdef int ny = f.shape[1] + * cdef int nz = f.shape[2] + * cdef int nkx = g.shape[0] # <<<<<<<<<<<<<< + * cdef int nky = g.shape[1] + * cdef int nkz = g.shape[2] + */ + __pyx_v_nkx = (__pyx_v_g->dimensions[0]); + + /* "astropy/convolution/boundary_wrap.pyx":134 + * cdef int nz = f.shape[2] + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] # <<<<<<<<<<<<<< + * cdef int nkz = g.shape[2] + * cdef int wkx = nkx // 2 + */ + __pyx_v_nky = (__pyx_v_g->dimensions[1]); + + /* "astropy/convolution/boundary_wrap.pyx":135 + * cdef int nkx = g.shape[0] + * cdef int nky = g.shape[1] + * cdef int nkz = g.shape[2] # <<<<<<<<<<<<<< + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + */ + __pyx_v_nkz = (__pyx_v_g->dimensions[2]); + + /* "astropy/convolution/boundary_wrap.pyx":136 + * cdef int nky = g.shape[1] + * cdef int nkz = g.shape[2] + * cdef int wkx = nkx // 2 # <<<<<<<<<<<<<< + * cdef int wky = nky // 2 + * cdef int wkz = nkz // 2 + */ + __pyx_v_wkx = __Pyx_div_long(__pyx_v_nkx, 2); + + /* "astropy/convolution/boundary_wrap.pyx":137 + * cdef int nkz = g.shape[2] + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 # <<<<<<<<<<<<<< + * cdef int wkz = nkz // 2 + * cdef np.ndarray[DTYPE_t, ndim=3] conv = np.empty([nx, ny, nz], dtype=DTYPE) + */ + __pyx_v_wky = __Pyx_div_long(__pyx_v_nky, 2); + + /* "astropy/convolution/boundary_wrap.pyx":138 + * cdef int wkx = nkx // 2 + * cdef int wky = nky // 2 + * cdef int wkz = nkz // 2 # <<<<<<<<<<<<<< + * cdef np.ndarray[DTYPE_t, ndim=3] conv = np.empty([nx, ny, nz], dtype=DTYPE) + * cdef unsigned int i, j, k, iii, jjj, kkk + */ + __pyx_v_wkz = __Pyx_div_long(__pyx_v_nkz, 2); + + /* "astropy/convolution/boundary_wrap.pyx":139 + * cdef int wky = nky // 2 + * cdef int wkz = nkz // 2 + * cdef np.ndarray[DTYPE_t, ndim=3] conv = np.empty([nx, ny, nz], dtype=DTYPE) # <<<<<<<<<<<<<< + * cdef unsigned int i, j, k, iii, jjj, kkk + * cdef int ii, jj, kk + */ + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_nx); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_ny); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_nz); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = PyList_New(3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_GIVEREF(__pyx_t_3); + PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_5); + PyList_SET_ITEM(__pyx_t_7, 1, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_6); + PyList_SET_ITEM(__pyx_t_7, 2, __pyx_t_6); + __pyx_t_3 = 0; + __pyx_t_5 = 0; + __pyx_t_6 = 0; + __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_7); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); + __pyx_t_7 = 0; + __pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 139, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, __pyx_t_7); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 139, __pyx_L1_error) + __pyx_t_8 = ((PyArrayObject *)__pyx_t_5); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_conv.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) { + __pyx_v_conv = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 139, __pyx_L1_error) + } else {__pyx_pybuffernd_conv.diminfo[0].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_conv.diminfo[0].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_conv.diminfo[1].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_conv.diminfo[1].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_conv.diminfo[2].strides = __pyx_pybuffernd_conv.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_conv.diminfo[2].shape = __pyx_pybuffernd_conv.rcbuffer->pybuffer.shape[2]; + } + } + __pyx_t_8 = 0; + __pyx_v_conv = ((PyArrayObject *)__pyx_t_5); + __pyx_t_5 = 0; + + /* "astropy/convolution/boundary_wrap.pyx":148 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + + /* "astropy/convolution/boundary_wrap.pyx":151 + * + * # Now run the proper convolution + * for i in range(nx): # <<<<<<<<<<<<<< + * for j in range(ny): + * for k in range(nz): + */ + __pyx_t_9 = __pyx_v_nx; + for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { + __pyx_v_i = __pyx_t_10; + + /* "astropy/convolution/boundary_wrap.pyx":152 + * # Now run the proper convolution + * for i in range(nx): + * for j in range(ny): # <<<<<<<<<<<<<< + * for k in range(nz): + * top = 0. + */ + __pyx_t_11 = __pyx_v_ny; + for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { + __pyx_v_j = __pyx_t_12; + + /* "astropy/convolution/boundary_wrap.pyx":153 + * for i in range(nx): + * for j in range(ny): + * for k in range(nz): # <<<<<<<<<<<<<< + * top = 0. + * bot = 0. + */ + __pyx_t_13 = __pyx_v_nz; + for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { + __pyx_v_k = __pyx_t_14; + + /* "astropy/convolution/boundary_wrap.pyx":154 + * for j in range(ny): + * for k in range(nz): + * top = 0. # <<<<<<<<<<<<<< + * bot = 0. + * iimin = i - wkx + */ + __pyx_v_top = 0.; + + /* "astropy/convolution/boundary_wrap.pyx":155 + * for k in range(nz): + * top = 0. + * bot = 0. # <<<<<<<<<<<<<< + * iimin = i - wkx + * iimax = i + wkx + 1 + */ + __pyx_v_bot = 0.; + + /* "astropy/convolution/boundary_wrap.pyx":156 + * top = 0. + * bot = 0. + * iimin = i - wkx # <<<<<<<<<<<<<< + * iimax = i + wkx + 1 + * jjmin = j - wky + */ + __pyx_v_iimin = (__pyx_v_i - __pyx_v_wkx); + + /* "astropy/convolution/boundary_wrap.pyx":157 + * bot = 0. + * iimin = i - wkx + * iimax = i + wkx + 1 # <<<<<<<<<<<<<< + * jjmin = j - wky + * jjmax = j + wky + 1 + */ + __pyx_v_iimax = ((__pyx_v_i + __pyx_v_wkx) + 1); + + /* "astropy/convolution/boundary_wrap.pyx":158 + * iimin = i - wkx + * iimax = i + wkx + 1 + * jjmin = j - wky # <<<<<<<<<<<<<< + * jjmax = j + wky + 1 + * kkmin = k - wkz + */ + __pyx_v_jjmin = (__pyx_v_j - __pyx_v_wky); + + /* "astropy/convolution/boundary_wrap.pyx":159 + * iimax = i + wkx + 1 + * jjmin = j - wky + * jjmax = j + wky + 1 # <<<<<<<<<<<<<< + * kkmin = k - wkz + * kkmax = k + wkz + 1 + */ + __pyx_v_jjmax = ((__pyx_v_j + __pyx_v_wky) + 1); + + /* "astropy/convolution/boundary_wrap.pyx":160 + * jjmin = j - wky + * jjmax = j + wky + 1 + * kkmin = k - wkz # <<<<<<<<<<<<<< + * kkmax = k + wkz + 1 + * for ii in range(iimin, iimax): + */ + __pyx_v_kkmin = (__pyx_v_k - __pyx_v_wkz); + + /* "astropy/convolution/boundary_wrap.pyx":161 + * jjmax = j + wky + 1 + * kkmin = k - wkz + * kkmax = k + wkz + 1 # <<<<<<<<<<<<<< + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): + */ + __pyx_v_kkmax = ((__pyx_v_k + __pyx_v_wkz) + 1); + + /* "astropy/convolution/boundary_wrap.pyx":162 + * kkmin = k - wkz + * kkmax = k + wkz + 1 + * for ii in range(iimin, iimax): # <<<<<<<<<<<<<< + * for jj in range(jjmin, jjmax): + * for kk in range(kkmin, kkmax): + */ + __pyx_t_15 = __pyx_v_iimax; + for (__pyx_t_16 = __pyx_v_iimin; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) { + __pyx_v_ii = __pyx_t_16; + + /* "astropy/convolution/boundary_wrap.pyx":163 + * kkmax = k + wkz + 1 + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): # <<<<<<<<<<<<<< + * for kk in range(kkmin, kkmax): + * iii = ii % nx + */ + __pyx_t_17 = __pyx_v_jjmax; + for (__pyx_t_18 = __pyx_v_jjmin; __pyx_t_18 < __pyx_t_17; __pyx_t_18+=1) { + __pyx_v_jj = __pyx_t_18; + + /* "astropy/convolution/boundary_wrap.pyx":164 + * for ii in range(iimin, iimax): + * for jj in range(jjmin, jjmax): + * for kk in range(kkmin, kkmax): # <<<<<<<<<<<<<< + * iii = ii % nx + * jjj = jj % ny + */ + __pyx_t_19 = __pyx_v_kkmax; + for (__pyx_t_20 = __pyx_v_kkmin; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) { + __pyx_v_kk = __pyx_t_20; + + /* "astropy/convolution/boundary_wrap.pyx":165 + * for jj in range(jjmin, jjmax): + * for kk in range(kkmin, kkmax): + * iii = ii % nx # <<<<<<<<<<<<<< + * jjj = jj % ny + * kkk = kk % nz + */ + if (unlikely(__pyx_v_nx == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 165, __pyx_L10_error) + } + __pyx_v_iii = __Pyx_mod_int(__pyx_v_ii, __pyx_v_nx); + + /* "astropy/convolution/boundary_wrap.pyx":166 + * for kk in range(kkmin, kkmax): + * iii = ii % nx + * jjj = jj % ny # <<<<<<<<<<<<<< + * kkk = kk % nz + * val = f[iii, jjj, kkk] + */ + if (unlikely(__pyx_v_ny == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 166, __pyx_L10_error) + } + __pyx_v_jjj = __Pyx_mod_int(__pyx_v_jj, __pyx_v_ny); + + /* "astropy/convolution/boundary_wrap.pyx":167 + * iii = ii % nx + * jjj = jj % ny + * kkk = kk % nz # <<<<<<<<<<<<<< + * val = f[iii, jjj, kkk] + * ker = g[(nkx - 1 - (wkx + ii - i)), + */ + if (unlikely(__pyx_v_nz == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 167, __pyx_L10_error) + } + __pyx_v_kkk = __Pyx_mod_int(__pyx_v_kk, __pyx_v_nz); + + /* "astropy/convolution/boundary_wrap.pyx":168 + * jjj = jj % ny + * kkk = kk % nz + * val = f[iii, jjj, kkk] # <<<<<<<<<<<<<< + * ker = g[(nkx - 1 - (wkx + ii - i)), + * (nky - 1 - (wky + jj - j)), + */ + __pyx_t_21 = __pyx_v_iii; + __pyx_t_22 = __pyx_v_jjj; + __pyx_t_23 = __pyx_v_kkk; + __pyx_v_val = (*__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_22, __pyx_pybuffernd_f.diminfo[1].strides, __pyx_t_23, __pyx_pybuffernd_f.diminfo[2].strides)); + + /* "astropy/convolution/boundary_wrap.pyx":169 + * kkk = kk % nz + * val = f[iii, jjj, kkk] + * ker = g[(nkx - 1 - (wkx + ii - i)), # <<<<<<<<<<<<<< + * (nky - 1 - (wky + jj - j)), + * (nkz - 1 - (wkz + kk - k))] + */ + __pyx_t_24 = ((unsigned int)((__pyx_v_nkx - 1) - ((__pyx_v_wkx + __pyx_v_ii) - __pyx_v_i))); + __pyx_t_25 = ((unsigned int)((__pyx_v_nky - 1) - ((__pyx_v_wky + __pyx_v_jj) - __pyx_v_j))); + __pyx_t_26 = ((unsigned int)((__pyx_v_nkz - 1) - ((__pyx_v_wkz + __pyx_v_kk) - __pyx_v_k))); + __pyx_v_ker = (*__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_g.rcbuffer->pybuffer.buf, __pyx_t_24, __pyx_pybuffernd_g.diminfo[0].strides, __pyx_t_25, __pyx_pybuffernd_g.diminfo[1].strides, __pyx_t_26, __pyx_pybuffernd_g.diminfo[2].strides)); + + /* "astropy/convolution/boundary_wrap.pyx":172 + * (nky - 1 - (wky + jj - j)), + * (nkz - 1 - (wkz + kk - k))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + __pyx_t_1 = ((!(npy_isnan(__pyx_v_val) != 0)) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_wrap.pyx":173 + * (nkz - 1 - (wkz + kk - k))] + * if not npy_isnan(val): + * top += val * ker # <<<<<<<<<<<<<< + * bot += ker + * if normalize_by_kernel: + */ + __pyx_v_top = (__pyx_v_top + (__pyx_v_val * __pyx_v_ker)); + + /* "astropy/convolution/boundary_wrap.pyx":174 + * if not npy_isnan(val): + * top += val * ker + * bot += ker # <<<<<<<<<<<<<< + * if normalize_by_kernel: + * if bot == 0: + */ + __pyx_v_bot = (__pyx_v_bot + __pyx_v_ker); + + /* "astropy/convolution/boundary_wrap.pyx":172 + * (nky - 1 - (wky + jj - j)), + * (nkz - 1 - (wkz + kk - k))] + * if not npy_isnan(val): # <<<<<<<<<<<<<< + * top += val * ker + * bot += ker + */ + } + } + } + } + + /* "astropy/convolution/boundary_wrap.pyx":175 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j, k] = f[i, j, k] + */ + __pyx_t_1 = (__pyx_v_normalize_by_kernel != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_wrap.pyx":176 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j, k] = f[i, j, k] + * else: + */ + __pyx_t_1 = ((__pyx_v_bot == 0.0) != 0); + if (__pyx_t_1) { + + /* "astropy/convolution/boundary_wrap.pyx":177 + * if normalize_by_kernel: + * if bot == 0: + * conv[i, j, k] = f[i, j, k] # <<<<<<<<<<<<<< + * else: + * conv[i, j, k] = top / bot + */ + __pyx_t_27 = __pyx_v_i; + __pyx_t_28 = __pyx_v_j; + __pyx_t_29 = __pyx_v_k; + __pyx_t_30 = __pyx_v_i; + __pyx_t_31 = __pyx_v_j; + __pyx_t_32 = __pyx_v_k; + *__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_30, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_31, __pyx_pybuffernd_conv.diminfo[1].strides, __pyx_t_32, __pyx_pybuffernd_conv.diminfo[2].strides) = (*__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_f.rcbuffer->pybuffer.buf, __pyx_t_27, __pyx_pybuffernd_f.diminfo[0].strides, __pyx_t_28, __pyx_pybuffernd_f.diminfo[1].strides, __pyx_t_29, __pyx_pybuffernd_f.diminfo[2].strides)); + + /* "astropy/convolution/boundary_wrap.pyx":176 + * bot += ker + * if normalize_by_kernel: + * if bot == 0: # <<<<<<<<<<<<<< + * conv[i, j, k] = f[i, j, k] + * else: + */ + goto __pyx_L26; + } + + /* "astropy/convolution/boundary_wrap.pyx":179 + * conv[i, j, k] = f[i, j, k] + * else: + * conv[i, j, k] = top / bot # <<<<<<<<<<<<<< + * else: + * conv[i, j, k] = top + */ + /*else*/ { + if (unlikely(__pyx_v_bot == 0)) { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __PYX_ERR(0, 179, __pyx_L10_error) + } + __pyx_t_33 = __pyx_v_i; + __pyx_t_34 = __pyx_v_j; + __pyx_t_35 = __pyx_v_k; + *__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_33, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_34, __pyx_pybuffernd_conv.diminfo[1].strides, __pyx_t_35, __pyx_pybuffernd_conv.diminfo[2].strides) = (__pyx_v_top / __pyx_v_bot); + } + __pyx_L26:; + + /* "astropy/convolution/boundary_wrap.pyx":175 + * top += val * ker + * bot += ker + * if normalize_by_kernel: # <<<<<<<<<<<<<< + * if bot == 0: + * conv[i, j, k] = f[i, j, k] + */ + goto __pyx_L25; + } + + /* "astropy/convolution/boundary_wrap.pyx":181 + * conv[i, j, k] = top / bot + * else: + * conv[i, j, k] = top # <<<<<<<<<<<<<< + * # GIL acquired again here + * return conv + */ + /*else*/ { + __pyx_t_36 = __pyx_v_i; + __pyx_t_37 = __pyx_v_j; + __pyx_t_38 = __pyx_v_k; + *__Pyx_BufPtrStrided3d(__pyx_t_7astropy_11convolution_13boundary_wrap_DTYPE_t *, __pyx_pybuffernd_conv.rcbuffer->pybuffer.buf, __pyx_t_36, __pyx_pybuffernd_conv.diminfo[0].strides, __pyx_t_37, __pyx_pybuffernd_conv.diminfo[1].strides, __pyx_t_38, __pyx_pybuffernd_conv.diminfo[2].strides) = __pyx_v_top; + } + __pyx_L25:; + } + } + } + } + + /* "astropy/convolution/boundary_wrap.pyx":148 + * + * # release the GIL + * with nogil: # <<<<<<<<<<<<<< + * + * # Now run the proper convolution + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L11; + } + __pyx_L10_error: { + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L1_error; + } + __pyx_L11:; + } + } + + /* "astropy/convolution/boundary_wrap.pyx":183 + * conv[i, j, k] = top + * # GIL acquired again here + * return conv # <<<<<<<<<<<<<< + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_conv)); + __pyx_r = ((PyObject *)__pyx_v_conv); + goto __pyx_L0; + + /* "astropy/convolution/boundary_wrap.pyx":121 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_wrap(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * bint normalize_by_kernel): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("astropy.convolution.boundary_wrap.convolve3d_boundary_wrap", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_conv.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_f.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_g.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_conv); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 + * # experimental exception made for __getbuffer__ and __releasebuffer__ + * # -- the details of this may change. + * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< + * # This implementation of getbuffer is geared towards Cython + * # requirements, and does not yet fullfill the PEP. + */ + +/* Python wrapper */ +static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_v_copy_shape; + int __pyx_v_i; + int __pyx_v_ndim; + int __pyx_v_endian_detector; + int __pyx_v_little_endian; + int __pyx_v_t; + char *__pyx_v_f; + PyArray_Descr *__pyx_v_descr = 0; + int __pyx_v_offset; + int __pyx_v_hasfields; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + char *__pyx_t_7; + __Pyx_RefNannySetupContext("__getbuffer__", 0); + if (__pyx_v_info != NULL) { + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":220 + * # of flags + * + * if info == NULL: return # <<<<<<<<<<<<<< + * + * cdef int copy_shape, i, ndim + */ + __pyx_t_1 = ((__pyx_v_info == NULL) != 0); + if (__pyx_t_1) { + __pyx_r = 0; + goto __pyx_L0; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":223 + * + * cdef int copy_shape, i, ndim + * cdef int endian_detector = 1 # <<<<<<<<<<<<<< + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * + */ + __pyx_v_endian_detector = 1; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":224 + * cdef int copy_shape, i, ndim + * cdef int endian_detector = 1 + * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< + * + * ndim = PyArray_NDIM(self) + */ + __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":226 + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * + * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 + * ndim = PyArray_NDIM(self) + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * copy_shape = 1 + * else: + */ + __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":229 + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * copy_shape = 1 # <<<<<<<<<<<<<< + * else: + * copy_shape = 0 + */ + __pyx_v_copy_shape = 1; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 + * ndim = PyArray_NDIM(self) + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * copy_shape = 1 + * else: + */ + goto __pyx_L4; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231 + * copy_shape = 1 + * else: + * copy_shape = 0 # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + */ + /*else*/ { + __pyx_v_copy_shape = 0; + } + __pyx_L4:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L6_bool_binop_done; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":234 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< + * raise ValueError(u"ndarray is not C contiguous") + * + */ + __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L6_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 235, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L9_bool_binop_done; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":238 + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< + * raise ValueError(u"ndarray is not Fortran contiguous") + * + */ + __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L9_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< + * + * info.buf = PyArray_DATA(self) + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 239, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 239, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":241 + * raise ValueError(u"ndarray is not Fortran contiguous") + * + * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< + * info.ndim = ndim + * if copy_shape: + */ + __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":242 + * + * info.buf = PyArray_DATA(self) + * info.ndim = ndim # <<<<<<<<<<<<<< + * if copy_shape: + * # Allocate new buffer for strides and shape info. + */ + __pyx_v_info->ndim = __pyx_v_ndim; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 + * info.buf = PyArray_DATA(self) + * info.ndim = ndim + * if copy_shape: # <<<<<<<<<<<<<< + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + */ + __pyx_t_1 = (__pyx_v_copy_shape != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":246 + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) # <<<<<<<<<<<<<< + * info.shape = info.strides + ndim + * for i in range(ndim): + */ + __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":247 + * # This is allocated as one block, strides first. + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) + * info.shape = info.strides + ndim # <<<<<<<<<<<<<< + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] + */ + __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":248 + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) + * info.shape = info.strides + ndim + * for i in range(ndim): # <<<<<<<<<<<<<< + * info.strides[i] = PyArray_STRIDES(self)[i] + * info.shape[i] = PyArray_DIMS(self)[i] + */ + __pyx_t_4 = __pyx_v_ndim; + for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { + __pyx_v_i = __pyx_t_5; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":249 + * info.shape = info.strides + ndim + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< + * info.shape[i] = PyArray_DIMS(self)[i] + * else: + */ + (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":250 + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] + * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< + * else: + * info.strides = PyArray_STRIDES(self) + */ + (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 + * info.buf = PyArray_DATA(self) + * info.ndim = ndim + * if copy_shape: # <<<<<<<<<<<<<< + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + */ + goto __pyx_L11; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":252 + * info.shape[i] = PyArray_DIMS(self)[i] + * else: + * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL + */ + /*else*/ { + __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":253 + * else: + * info.strides = PyArray_STRIDES(self) + * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) + */ + __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); + } + __pyx_L11:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":254 + * info.strides = PyArray_STRIDES(self) + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * info.itemsize = PyArray_ITEMSIZE(self) + * info.readonly = not PyArray_ISWRITEABLE(self) + */ + __pyx_v_info->suboffsets = NULL; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":255 + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< + * info.readonly = not PyArray_ISWRITEABLE(self) + * + */ + __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":256 + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) + * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< + * + * cdef int t + */ + __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":259 + * + * cdef int t + * cdef char* f = NULL # <<<<<<<<<<<<<< + * cdef dtype descr = self.descr + * cdef int offset + */ + __pyx_v_f = NULL; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":260 + * cdef int t + * cdef char* f = NULL + * cdef dtype descr = self.descr # <<<<<<<<<<<<<< + * cdef int offset + * + */ + __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); + __Pyx_INCREF(__pyx_t_3); + __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":263 + * cdef int offset + * + * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< + * + * if not hasfields and not copy_shape: + */ + __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 + * cdef bint hasfields = PyDataType_HASFIELDS(descr) + * + * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< + * # do not call releasebuffer + * info.obj = None + */ + __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L15_bool_binop_done; + } + __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L15_bool_binop_done:; + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":267 + * if not hasfields and not copy_shape: + * # do not call releasebuffer + * info.obj = None # <<<<<<<<<<<<<< + * else: + * # need to call releasebuffer + */ + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = Py_None; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 + * cdef bint hasfields = PyDataType_HASFIELDS(descr) + * + * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< + * # do not call releasebuffer + * info.obj = None + */ + goto __pyx_L14; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 + * else: + * # need to call releasebuffer + * info.obj = self # <<<<<<<<<<<<<< + * + * if not hasfields: + */ + /*else*/ { + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + } + __pyx_L14:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 + * info.obj = self + * + * if not hasfields: # <<<<<<<<<<<<<< + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + */ + __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":273 + * + * if not hasfields: + * t = descr.type_num # <<<<<<<<<<<<<< + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + */ + __pyx_t_4 = __pyx_v_descr->type_num; + __pyx_v_t = __pyx_t_4; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); + if (!__pyx_t_2) { + goto __pyx_L20_next_or; + } else { + } + __pyx_t_2 = (__pyx_v_little_endian != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L19_bool_binop_done; + } + __pyx_L20_next_or:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275 + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" + */ + __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L19_bool_binop_done; + } + __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L19_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 276, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":277 + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" + */ + switch (__pyx_v_t) { + case NPY_BYTE: + __pyx_v_f = ((char *)"b"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278 + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" + */ + case NPY_UBYTE: + __pyx_v_f = ((char *)"B"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279 + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" + */ + case NPY_SHORT: + __pyx_v_f = ((char *)"h"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" + */ + case NPY_USHORT: + __pyx_v_f = ((char *)"H"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":281 + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" + */ + case NPY_INT: + __pyx_v_f = ((char *)"i"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":282 + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" + */ + case NPY_UINT: + __pyx_v_f = ((char *)"I"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283 + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" + */ + case NPY_LONG: + __pyx_v_f = ((char *)"l"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284 + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" + */ + case NPY_ULONG: + __pyx_v_f = ((char *)"L"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":285 + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" + */ + case NPY_LONGLONG: + __pyx_v_f = ((char *)"q"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286 + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" + */ + case NPY_ULONGLONG: + __pyx_v_f = ((char *)"Q"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287 + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" + */ + case NPY_FLOAT: + __pyx_v_f = ((char *)"f"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":288 + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" + */ + case NPY_DOUBLE: + __pyx_v_f = ((char *)"d"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289 + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" + */ + case NPY_LONGDOUBLE: + __pyx_v_f = ((char *)"g"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":290 + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" + */ + case NPY_CFLOAT: + __pyx_v_f = ((char *)"Zf"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":291 + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< + * elif t == NPY_CLONGDOUBLE: f = "Zg" + * elif t == NPY_OBJECT: f = "O" + */ + case NPY_CDOUBLE: + __pyx_v_f = ((char *)"Zd"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292 + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< + * elif t == NPY_OBJECT: f = "O" + * else: + */ + case NPY_CLONGDOUBLE: + __pyx_v_f = ((char *)"Zg"); + break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":293 + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" + * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + */ + case NPY_OBJECT: + __pyx_v_f = ((char *)"O"); + break; + default: + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":295 + * elif t == NPY_OBJECT: f = "O" + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< + * info.format = f + * return + */ + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_6, 0, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 295, __pyx_L1_error) + break; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296 + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * info.format = f # <<<<<<<<<<<<<< + * return + * else: + */ + __pyx_v_info->format = __pyx_v_f; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297 + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * info.format = f + * return # <<<<<<<<<<<<<< + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 + * info.obj = self + * + * if not hasfields: # <<<<<<<<<<<<<< + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":299 + * return + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 + */ + /*else*/ { + __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":300 + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) + * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< + * offset = 0 + * f = _util_dtypestring(descr, info.format + 1, + */ + (__pyx_v_info->format[0]) = '^'; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":301 + * info.format = PyObject_Malloc(_buffer_format_string_len) + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 # <<<<<<<<<<<<<< + * f = _util_dtypestring(descr, info.format + 1, + * info.format + _buffer_format_string_len, + */ + __pyx_v_offset = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 + * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< + * info.format + _buffer_format_string_len, + * &offset) + */ + __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 302, __pyx_L1_error) + __pyx_v_f = __pyx_t_7; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":305 + * info.format + _buffer_format_string_len, + * &offset) + * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + */ + (__pyx_v_f[0]) = '\x00'; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 + * # experimental exception made for __getbuffer__ and __releasebuffer__ + * # -- the details of this may change. + * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< + * # This implementation of getbuffer is geared towards Cython + * # requirements, and does not yet fullfill the PEP. + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(Py_None); + __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; + } + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_descr); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 + * f[0] = c'\0' # Terminate format string + * + * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + */ + +/* Python wrapper */ +static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ +static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); + __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__releasebuffer__", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":309 + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) # <<<<<<<<<<<<<< + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * PyObject_Free(info.strides) + */ + PyObject_Free(__pyx_v_info->format); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * PyObject_Free(info.strides) + * # info.shape was stored after info.strides in the same block + */ + __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":311 + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * PyObject_Free(info.strides) # <<<<<<<<<<<<<< + * # info.shape was stored after info.strides in the same block + * + */ + PyObject_Free(__pyx_v_info->strides); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * PyObject_Free(info.strides) + * # info.shape was stored after info.strides in the same block + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 + * f[0] = c'\0' # Terminate format string + * + * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":789 + * + * cdef inline object PyArray_MultiIterNew1(a): + * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew2(a, b): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 789, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":792 + * + * cdef inline object PyArray_MultiIterNew2(a, b): + * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 792, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":795 + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 795, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":798 + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 798, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801 + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 801, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805 + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape # <<<<<<<<<<<<<< + * else: + * return () + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); + __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":807 + * return d.subarray.shape + * else: + * return () # <<<<<<<<<<<<<< + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_empty_tuple); + __pyx_r = __pyx_empty_tuple; + goto __pyx_L0; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { + PyArray_Descr *__pyx_v_child = 0; + int __pyx_v_endian_detector; + int __pyx_v_little_endian; + PyObject *__pyx_v_fields = 0; + PyObject *__pyx_v_childname = NULL; + PyObject *__pyx_v_new_offset = NULL; + PyObject *__pyx_v_t = NULL; + char *__pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_t_7; + long __pyx_t_8; + char *__pyx_t_9; + __Pyx_RefNannySetupContext("_util_dtypestring", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814 + * + * cdef dtype child + * cdef int endian_detector = 1 # <<<<<<<<<<<<<< + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * cdef tuple fields + */ + __pyx_v_endian_detector = 1; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":815 + * cdef dtype child + * cdef int endian_detector = 1 + * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< + * cdef tuple fields + * + */ + __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + if (unlikely(__pyx_v_descr->names == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); + __PYX_ERR(1, 818, __pyx_L1_error) + } + __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; + for (;;) { + if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 818, __pyx_L1_error) + #else + __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 818, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + #endif + __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); + __pyx_t_3 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":819 + * + * for childname in descr.names: + * fields = descr.fields[childname] # <<<<<<<<<<<<<< + * child, new_offset = fields + * + */ + if (unlikely(__pyx_v_descr->fields == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 819, __pyx_L1_error) + } + __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); + __pyx_t_3 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":820 + * for childname in descr.names: + * fields = descr.fields[childname] + * child, new_offset = fields # <<<<<<<<<<<<<< + * + * if (end - f) - (new_offset - offset[0]) < 15: + */ + if (likely(__pyx_v_fields != Py_None)) { + PyObject* sequence = __pyx_v_fields; + #if !CYTHON_COMPILING_IN_PYPY + Py_ssize_t size = Py_SIZE(sequence); + #else + Py_ssize_t size = PySequence_Size(sequence); + #endif + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 820, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + #else + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #endif + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 820, __pyx_L1_error) + } + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); + __pyx_t_3 = 0; + __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 823, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 823, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); + if (!__pyx_t_7) { + goto __pyx_L8_next_or; + } else { + } + __pyx_t_7 = (__pyx_v_little_endian != 0); + if (!__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_L8_next_or:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":826 + * + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< + * raise ValueError(u"Non-native byte order not supported") + * # One could encode it in the format string and have Cython + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); + if (__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); + __pyx_t_6 = __pyx_t_7; + __pyx_L7_bool_binop_done:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 827, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 + * + * # Output padding bytes + * while offset[0] < new_offset: # <<<<<<<<<<<<<< + * f[0] = 120 # "x"; pad byte + * f += 1 + */ + while (1) { + __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!__pyx_t_6) break; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838 + * # Output padding bytes + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< + * f += 1 + * offset[0] += 1 + */ + (__pyx_v_f[0]) = 0x78; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":839 + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte + * f += 1 # <<<<<<<<<<<<<< + * offset[0] += 1 + * + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840 + * f[0] = 120 # "x"; pad byte + * f += 1 + * offset[0] += 1 # <<<<<<<<<<<<<< + * + * offset[0] += child.itemsize + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842 + * offset[0] += 1 + * + * offset[0] += child.itemsize # <<<<<<<<<<<<<< + * + * if not PyDataType_HASFIELDS(child): + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":845 + * + * if not PyDataType_HASFIELDS(child): + * t = child.type_num # <<<<<<<<<<<<<< + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") + */ + __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 845, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); + if (__pyx_t_6) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 847, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 847, __pyx_L1_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":850 + * + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 98; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 66; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852 + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x68; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853 + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 72; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":854 + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x69; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 73; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856 + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x6C; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":857 + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 76; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x71; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":859 + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 81; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860 + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x66; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":861 + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x64; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":862 + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x67; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":863 + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x66; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":864 + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x64; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":865 + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x67; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":866 + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 79; + goto __pyx_L15; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":868 + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< + * f += 1 + * else: + */ + /*else*/ { + __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); + __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 868, __pyx_L1_error) + } + __pyx_L15:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":869 + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * f += 1 # <<<<<<<<<<<<<< + * else: + * # Cython ignores struct boundary information ("T{...}"), + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + goto __pyx_L13; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":873 + * # Cython ignores struct boundary information ("T{...}"), + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< + * return f + * + */ + /*else*/ { + __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 873, __pyx_L1_error) + __pyx_v_f = __pyx_t_9; + } + __pyx_L13:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":874 + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) + * return f # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_f; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_child); + __Pyx_XDECREF(__pyx_v_fields); + __Pyx_XDECREF(__pyx_v_childname); + __Pyx_XDECREF(__pyx_v_new_offset); + __Pyx_XDECREF(__pyx_v_t); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 + * + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * cdef PyObject* baseptr + * if base is None: + */ + +static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { + PyObject *__pyx_v_baseptr; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + __Pyx_RefNannySetupContext("set_array_base", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 + * cdef inline void set_array_base(ndarray arr, object base): + * cdef PyObject* baseptr + * if base is None: # <<<<<<<<<<<<<< + * baseptr = NULL + * else: + */ + __pyx_t_1 = (__pyx_v_base == Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":993 + * cdef PyObject* baseptr + * if base is None: + * baseptr = NULL # <<<<<<<<<<<<<< + * else: + * Py_INCREF(base) # important to do this before decref below! + */ + __pyx_v_baseptr = NULL; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 + * cdef inline void set_array_base(ndarray arr, object base): + * cdef PyObject* baseptr + * if base is None: # <<<<<<<<<<<<<< + * baseptr = NULL + * else: + */ + goto __pyx_L3; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":995 + * baseptr = NULL + * else: + * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< + * baseptr = base + * Py_XDECREF(arr.base) + */ + /*else*/ { + Py_INCREF(__pyx_v_base); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":996 + * else: + * Py_INCREF(base) # important to do this before decref below! + * baseptr = base # <<<<<<<<<<<<<< + * Py_XDECREF(arr.base) + * arr.base = baseptr + */ + __pyx_v_baseptr = ((PyObject *)__pyx_v_base); + } + __pyx_L3:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997 + * Py_INCREF(base) # important to do this before decref below! + * baseptr = base + * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< + * arr.base = baseptr + * + */ + Py_XDECREF(__pyx_v_arr->base); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":998 + * baseptr = base + * Py_XDECREF(arr.base) + * arr.base = baseptr # <<<<<<<<<<<<<< + * + * cdef inline object get_array_base(ndarray arr): + */ + __pyx_v_arr->base = __pyx_v_baseptr; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 + * + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * cdef PyObject* baseptr + * if base is None: + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 + * arr.base = baseptr + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * if arr.base is NULL: + * return None + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("get_array_base", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 + * + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: # <<<<<<<<<<<<<< + * return None + * else: + */ + __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); + if (__pyx_t_1) { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1002 + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: + * return None # <<<<<<<<<<<<<< + * else: + * return arr.base + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(Py_None); + __pyx_r = Py_None; + goto __pyx_L0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 + * + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: # <<<<<<<<<<<<<< + * return None + * else: + */ + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1004 + * return None + * else: + * return arr.base # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); + __pyx_r = ((PyObject *)__pyx_v_arr->base); + goto __pyx_L0; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 + * arr.base = baseptr + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * if arr.base is NULL: + * return None + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * _import_array() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_array", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1011 + * cdef inline int import_array() except -1: + * try: + * _import_array() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") + */ + __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1011, __pyx_L3_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1012 + * try: + * _import_array() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.multiarray failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1012, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 + * _import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1013, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1013, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * _import_array() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_umath", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1017 + * cdef inline int import_umath() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1017, __pyx_L3_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1018 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1018, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1019, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1019, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_ufunc", 0); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1023 + * cdef inline int import_ufunc() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1023, __pyx_L3_error) + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1024 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1024, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1025, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1025, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_boundary_wrap(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_boundary_wrap}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "boundary_wrap", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_kp_s_Convolution_kernel_must_have_odd, __pyx_k_Convolution_kernel_must_have_odd, sizeof(__pyx_k_Convolution_kernel_must_have_odd), 0, 0, 1, 0}, + {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, + {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, + {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, + {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, + {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, + {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, + {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, + {&__pyx_kp_s_astropy_convolution_boundary_wra, __pyx_k_astropy_convolution_boundary_wra, sizeof(__pyx_k_astropy_convolution_boundary_wra), 0, 0, 1, 0}, + {&__pyx_n_s_astropy_convolution_boundary_wra_2, __pyx_k_astropy_convolution_boundary_wra_2, sizeof(__pyx_k_astropy_convolution_boundary_wra_2), 0, 0, 1, 1}, + {&__pyx_n_s_bot, __pyx_k_bot, sizeof(__pyx_k_bot), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_conv, __pyx_k_conv, sizeof(__pyx_k_conv), 0, 0, 1, 1}, + {&__pyx_n_s_convolve1d_boundary_wrap, __pyx_k_convolve1d_boundary_wrap, sizeof(__pyx_k_convolve1d_boundary_wrap), 0, 0, 1, 1}, + {&__pyx_n_s_convolve2d_boundary_wrap, __pyx_k_convolve2d_boundary_wrap, sizeof(__pyx_k_convolve2d_boundary_wrap), 0, 0, 1, 1}, + {&__pyx_n_s_convolve3d_boundary_wrap, __pyx_k_convolve3d_boundary_wrap, sizeof(__pyx_k_convolve3d_boundary_wrap), 0, 0, 1, 1}, + {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, + {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1}, + {&__pyx_n_s_f, __pyx_k_f, sizeof(__pyx_k_f), 0, 0, 1, 1}, + {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, + {&__pyx_n_s_g, __pyx_k_g, sizeof(__pyx_k_g), 0, 0, 1, 1}, + {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, + {&__pyx_n_s_ii, __pyx_k_ii, sizeof(__pyx_k_ii), 0, 0, 1, 1}, + {&__pyx_n_s_iii, __pyx_k_iii, sizeof(__pyx_k_iii), 0, 0, 1, 1}, + {&__pyx_n_s_iimax, __pyx_k_iimax, sizeof(__pyx_k_iimax), 0, 0, 1, 1}, + {&__pyx_n_s_iimin, __pyx_k_iimin, sizeof(__pyx_k_iimin), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, + {&__pyx_n_s_jj, __pyx_k_jj, sizeof(__pyx_k_jj), 0, 0, 1, 1}, + {&__pyx_n_s_jjj, __pyx_k_jjj, sizeof(__pyx_k_jjj), 0, 0, 1, 1}, + {&__pyx_n_s_jjmax, __pyx_k_jjmax, sizeof(__pyx_k_jjmax), 0, 0, 1, 1}, + {&__pyx_n_s_jjmin, __pyx_k_jjmin, sizeof(__pyx_k_jjmin), 0, 0, 1, 1}, + {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, + {&__pyx_n_s_ker, __pyx_k_ker, sizeof(__pyx_k_ker), 0, 0, 1, 1}, + {&__pyx_n_s_kk, __pyx_k_kk, sizeof(__pyx_k_kk), 0, 0, 1, 1}, + {&__pyx_n_s_kkk, __pyx_k_kkk, sizeof(__pyx_k_kkk), 0, 0, 1, 1}, + {&__pyx_n_s_kkmax, __pyx_k_kkmax, sizeof(__pyx_k_kkmax), 0, 0, 1, 1}, + {&__pyx_n_s_kkmin, __pyx_k_kkmin, sizeof(__pyx_k_kkmin), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, + {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, + {&__pyx_n_s_nkx, __pyx_k_nkx, sizeof(__pyx_k_nkx), 0, 0, 1, 1}, + {&__pyx_n_s_nky, __pyx_k_nky, sizeof(__pyx_k_nky), 0, 0, 1, 1}, + {&__pyx_n_s_nkz, __pyx_k_nkz, sizeof(__pyx_k_nkz), 0, 0, 1, 1}, + {&__pyx_n_s_normalize_by_kernel, __pyx_k_normalize_by_kernel, sizeof(__pyx_k_normalize_by_kernel), 0, 0, 1, 1}, + {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, + {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, + {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, + {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, + {&__pyx_n_s_nx, __pyx_k_nx, sizeof(__pyx_k_nx), 0, 0, 1, 1}, + {&__pyx_n_s_ny, __pyx_k_ny, sizeof(__pyx_k_ny), 0, 0, 1, 1}, + {&__pyx_n_s_nz, __pyx_k_nz, sizeof(__pyx_k_nz), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_n_s_top, __pyx_k_top, sizeof(__pyx_k_top), 0, 0, 1, 1}, + {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, + {&__pyx_n_s_val, __pyx_k_val, sizeof(__pyx_k_val), 0, 0, 1, 1}, + {&__pyx_n_s_wkx, __pyx_k_wkx, sizeof(__pyx_k_wkx), 0, 0, 1, 1}, + {&__pyx_n_s_wky, __pyx_k_wky, sizeof(__pyx_k_wky), 0, 0, 1, 1}, + {&__pyx_n_s_wkz, __pyx_k_wkz, sizeof(__pyx_k_wkz), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 21, __pyx_L1_error) + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 40, __pyx_L1_error) + __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 823, __pyx_L1_error) + __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1013, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "astropy/convolution/boundary_wrap.pyx":21 + * + * if g.shape[0] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Convolution_kernel_must_have_odd); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 21, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple_); + __Pyx_GIVEREF(__pyx_tuple_); + + /* "astropy/convolution/boundary_wrap.pyx":69 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Convolution_kernel_must_have_odd); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "astropy/convolution/boundary_wrap.pyx":126 + * + * if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: + * raise ValueError("Convolution kernel must have odd dimensions") # <<<<<<<<<<<<<< + * + * assert f.dtype == DTYPE and g.dtype == DTYPE + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_Convolution_kernel_must_have_odd); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + */ + __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< + * + * info.buf = PyArray_DATA(self) + */ + __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 239, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + */ + __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 276, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 823, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__7); + __Pyx_GIVEREF(__pyx_tuple__7); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 827, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__8); + __Pyx_GIVEREF(__pyx_tuple__8); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 847, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__9); + __Pyx_GIVEREF(__pyx_tuple__9); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 + * _import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__10); + __Pyx_GIVEREF(__pyx_tuple__10); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 1019, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__11); + __Pyx_GIVEREF(__pyx_tuple__11); + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + */ + __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 1025, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__12); + __Pyx_GIVEREF(__pyx_tuple__12); + + /* "astropy/convolution/boundary_wrap.pyx":16 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_wrap(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * bint normalize_by_kernel): + */ + __pyx_tuple__13 = PyTuple_Pack(16, __pyx_n_s_f, __pyx_n_s_g, __pyx_n_s_normalize_by_kernel, __pyx_n_s_nx, __pyx_n_s_nkx, __pyx_n_s_wkx, __pyx_n_s_conv, __pyx_n_s_i, __pyx_n_s_iii, __pyx_n_s_ii, __pyx_n_s_iimin, __pyx_n_s_iimax, __pyx_n_s_top, __pyx_n_s_bot, __pyx_n_s_ker, __pyx_n_s_val); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__13); + __Pyx_GIVEREF(__pyx_tuple__13); + __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(3, 0, 16, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_convolution_boundary_wra, __pyx_n_s_convolve1d_boundary_wrap, 16, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) __PYX_ERR(0, 16, __pyx_L1_error) + + /* "astropy/convolution/boundary_wrap.pyx":64 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_wrap(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * bint normalize_by_kernel): + */ + __pyx_tuple__15 = PyTuple_Pack(24, __pyx_n_s_f, __pyx_n_s_g, __pyx_n_s_normalize_by_kernel, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_nkx, __pyx_n_s_nky, __pyx_n_s_wkx, __pyx_n_s_wky, __pyx_n_s_conv, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_iii, __pyx_n_s_jjj, __pyx_n_s_ii, __pyx_n_s_jj, __pyx_n_s_iimin, __pyx_n_s_iimax, __pyx_n_s_jjmin, __pyx_n_s_jjmax, __pyx_n_s_top, __pyx_n_s_bot, __pyx_n_s_ker, __pyx_n_s_val); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 64, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__15); + __Pyx_GIVEREF(__pyx_tuple__15); + __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(3, 0, 24, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_convolution_boundary_wra, __pyx_n_s_convolve2d_boundary_wrap, 64, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) __PYX_ERR(0, 64, __pyx_L1_error) + + /* "astropy/convolution/boundary_wrap.pyx":121 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_wrap(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * bint normalize_by_kernel): + */ + __pyx_tuple__17 = PyTuple_Pack(32, __pyx_n_s_f, __pyx_n_s_g, __pyx_n_s_normalize_by_kernel, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_nz, __pyx_n_s_nkx, __pyx_n_s_nky, __pyx_n_s_nkz, __pyx_n_s_wkx, __pyx_n_s_wky, __pyx_n_s_wkz, __pyx_n_s_conv, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_iii, __pyx_n_s_jjj, __pyx_n_s_kkk, __pyx_n_s_ii, __pyx_n_s_jj, __pyx_n_s_kk, __pyx_n_s_iimin, __pyx_n_s_iimax, __pyx_n_s_jjmin, __pyx_n_s_jjmax, __pyx_n_s_kkmin, __pyx_n_s_kkmax, __pyx_n_s_top, __pyx_n_s_bot, __pyx_n_s_ker, __pyx_n_s_val); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(0, 121, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__17); + __Pyx_GIVEREF(__pyx_tuple__17); + __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(3, 0, 32, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_convolution_boundary_wra, __pyx_n_s_convolve3d_boundary_wrap, 121, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) __PYX_ERR(0, 121, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + return 0; + __pyx_L1_error:; + return -1; +} + +#if PY_MAJOR_VERSION < 3 +PyMODINIT_FUNC initboundary_wrap(void); /*proto*/ +PyMODINIT_FUNC initboundary_wrap(void) +#else +PyMODINIT_FUNC PyInit_boundary_wrap(void); /*proto*/ +PyMODINIT_FUNC PyInit_boundary_wrap(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + result = PyDict_SetItemString(moddict, to_name, value); + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static int __pyx_pymod_exec_boundary_wrap(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; + #endif + #if CYTHON_REFNANNY + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); + if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); + } + #endif + __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_boundary_wrap(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("boundary_wrap", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + #if CYTHON_COMPILING_IN_PYPY + Py_INCREF(__pyx_b); + #endif + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_astropy__convolution__boundary_wrap) { + if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "astropy.convolution.boundary_wrap")) { + if (unlikely(PyDict_SetItemString(modules, "astropy.convolution.boundary_wrap", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global init code ---*/ + /*--- Variable export code ---*/ + /*--- Function export code ---*/ + /*--- Type init code ---*/ + /*--- Type import code ---*/ + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", + #if CYTHON_COMPILING_IN_PYPY + sizeof(PyTypeObject), + #else + sizeof(PyHeapTypeObject), + #endif + 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) + __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 163, __pyx_L1_error) + __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 185, __pyx_L1_error) + __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 189, __pyx_L1_error) + __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 198, __pyx_L1_error) + __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 885, __pyx_L1_error) + /*--- Variable import code ---*/ + /*--- Function import code ---*/ + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "astropy/convolution/boundary_wrap.pyx":3 + * # Licensed under a 3-clause BSD style license - see LICENSE.rst + * from __future__ import division + * import numpy as np # <<<<<<<<<<<<<< + * cimport numpy as np + * + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 3, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/convolution/boundary_wrap.pyx":6 + * cimport numpy as np + * + * DTYPE = np.float # <<<<<<<<<<<<<< + * ctypedef np.float_t DTYPE_t + * + */ + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) __PYX_ERR(0, 6, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_wrap.pyx":16 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve1d_boundary_wrap(np.ndarray[DTYPE_t, ndim=1] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=1] g, + * bint normalize_by_kernel): + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7astropy_11convolution_13boundary_wrap_1convolve1d_boundary_wrap, NULL, __pyx_n_s_astropy_convolution_boundary_wra_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_convolve1d_boundary_wrap, __pyx_t_2) < 0) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_wrap.pyx":64 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve2d_boundary_wrap(np.ndarray[DTYPE_t, ndim=2] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=2] g, + * bint normalize_by_kernel): + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7astropy_11convolution_13boundary_wrap_3convolve2d_boundary_wrap, NULL, __pyx_n_s_astropy_convolution_boundary_wra_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 64, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_convolve2d_boundary_wrap, __pyx_t_2) < 0) __PYX_ERR(0, 64, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_wrap.pyx":121 + * + * @cython.boundscheck(False) # turn off bounds-checking for entire function + * def convolve3d_boundary_wrap(np.ndarray[DTYPE_t, ndim=3] f, # <<<<<<<<<<<<<< + * np.ndarray[DTYPE_t, ndim=3] g, + * bint normalize_by_kernel): + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7astropy_11convolution_13boundary_wrap_5convolve3d_boundary_wrap, NULL, __pyx_n_s_astropy_convolution_boundary_wra_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 121, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_convolve3d_boundary_wrap, __pyx_t_2) < 0) __PYX_ERR(0, 121, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/convolution/boundary_wrap.pyx":1 + * # Licensed under a 3-clause BSD style license - see LICENSE.rst # <<<<<<<<<<<<<< + * from __future__ import division + * import numpy as np + */ + __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "../../../../../opt/local/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init astropy.convolution.boundary_wrap", 0, __pyx_lineno, __pyx_filename); + } + Py_DECREF(__pyx_m); __pyx_m = 0; + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init astropy.convolution.boundary_wrap"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule((char *)modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* ArgTypeTest */ +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", + name, type->tp_name, Py_TYPE(obj)->tp_name); + return 0; +} + +/* IsLittleEndian */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) +{ + union { + uint32_t u32; + uint8_t u8[4]; + } S; + S.u32 = 0x01020304; + return S.u8[0] == 4; +} + +/* BufferFormatCheck */ +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type) { + stack[0].field = &ctx->root; + stack[0].parent_offset = 0; + ctx->root.type = type; + ctx->root.name = "buffer dtype"; + ctx->root.offset = 0; + ctx->head = stack; + ctx->head->field = &ctx->root; + ctx->fmt_offset = 0; + ctx->head->parent_offset = 0; + ctx->new_packmode = '@'; + ctx->enc_packmode = '@'; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->is_complex = 0; + ctx->is_valid_array = 0; + ctx->struct_alignment = 0; + while (type->typegroup == 'S') { + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = 0; + type = type->fields->type; + } +} +static int __Pyx_BufFmt_ParseNumber(const char** ts) { + int count; + const char* t = *ts; + if (*t < '0' || *t > '9') { + return -1; + } else { + count = *t++ - '0'; + while (*t >= '0' && *t < '9') { + count *= 10; + count += *t++ - '0'; + } + } + *ts = t; + return count; +} +static int __Pyx_BufFmt_ExpectNumber(const char **ts) { + int number = __Pyx_BufFmt_ParseNumber(ts); + if (number == -1) + PyErr_Format(PyExc_ValueError,\ + "Does not understand character buffer dtype format string ('%c')", **ts); + return number; +} +static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { + PyErr_Format(PyExc_ValueError, + "Unexpected format string character: '%c'", ch); +} +static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { + switch (ch) { + case 'c': return "'char'"; + case 'b': return "'signed char'"; + case 'B': return "'unsigned char'"; + case 'h': return "'short'"; + case 'H': return "'unsigned short'"; + case 'i': return "'int'"; + case 'I': return "'unsigned int'"; + case 'l': return "'long'"; + case 'L': return "'unsigned long'"; + case 'q': return "'long long'"; + case 'Q': return "'unsigned long long'"; + case 'f': return (is_complex ? "'complex float'" : "'float'"); + case 'd': return (is_complex ? "'complex double'" : "'double'"); + case 'g': return (is_complex ? "'complex long double'" : "'long double'"); + case 'T': return "a struct"; + case 'O': return "Python object"; + case 'P': return "a pointer"; + case 's': case 'p': return "a string"; + case 0: return "end"; + default: return "unparseable format string"; + } +} +static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return 2; + case 'i': case 'I': case 'l': case 'L': return 4; + case 'q': case 'Q': return 8; + case 'f': return (is_complex ? 8 : 4); + case 'd': return (is_complex ? 16 : 8); + case 'g': { + PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); + return 0; + } + case 'O': case 'P': return sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { + switch (ch) { + case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(short); + case 'i': case 'I': return sizeof(int); + case 'l': case 'L': return sizeof(long); + #ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(PY_LONG_LONG); + #endif + case 'f': return sizeof(float) * (is_complex ? 2 : 1); + case 'd': return sizeof(double) * (is_complex ? 2 : 1); + case 'g': return sizeof(long double) * (is_complex ? 2 : 1); + case 'O': case 'P': return sizeof(void*); + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +typedef struct { char c; short x; } __Pyx_st_short; +typedef struct { char c; int x; } __Pyx_st_int; +typedef struct { char c; long x; } __Pyx_st_long; +typedef struct { char c; float x; } __Pyx_st_float; +typedef struct { char c; double x; } __Pyx_st_double; +typedef struct { char c; long double x; } __Pyx_st_longdouble; +typedef struct { char c; void *x; } __Pyx_st_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_st_float) - sizeof(float); + case 'd': return sizeof(__Pyx_st_double) - sizeof(double); + case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +/* These are for computing the padding at the end of the struct to align + on the first member of the struct. This will probably the same as above, + but we don't have any guarantees. + */ +typedef struct { short x; char c; } __Pyx_pad_short; +typedef struct { int x; char c; } __Pyx_pad_int; +typedef struct { long x; char c; } __Pyx_pad_long; +typedef struct { float x; char c; } __Pyx_pad_float; +typedef struct { double x; char c; } __Pyx_pad_double; +typedef struct { long double x; char c; } __Pyx_pad_longdouble; +typedef struct { void *x; char c; } __Pyx_pad_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); + case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); + case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { + switch (ch) { + case 'c': + return 'H'; + case 'b': case 'h': case 'i': + case 'l': case 'q': case 's': case 'p': + return 'I'; + case 'B': case 'H': case 'I': case 'L': case 'Q': + return 'U'; + case 'f': case 'd': case 'g': + return (is_complex ? 'C' : 'R'); + case 'O': + return 'O'; + case 'P': + return 'P'; + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { + if (ctx->head == NULL || ctx->head->field == &ctx->root) { + const char* expected; + const char* quote; + if (ctx->head == NULL) { + expected = "end"; + quote = ""; + } else { + expected = ctx->head->field->type->name; + quote = "'"; + } + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected %s%s%s but got %s", + quote, expected, quote, + __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); + } else { + __Pyx_StructField* field = ctx->head->field; + __Pyx_StructField* parent = (ctx->head - 1)->field; + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", + field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), + parent->type->name, field->name); + } +} +static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { + char group; + size_t size, offset, arraysize = 1; + if (ctx->enc_type == 0) return 0; + if (ctx->head->field->type->arraysize[0]) { + int i, ndim = 0; + if (ctx->enc_type == 's' || ctx->enc_type == 'p') { + ctx->is_valid_array = ctx->head->field->type->ndim == 1; + ndim = 1; + if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %zu", + ctx->head->field->type->arraysize[0], ctx->enc_count); + return -1; + } + } + if (!ctx->is_valid_array) { + PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", + ctx->head->field->type->ndim, ndim); + return -1; + } + for (i = 0; i < ctx->head->field->type->ndim; i++) { + arraysize *= ctx->head->field->type->arraysize[i]; + } + ctx->is_valid_array = 0; + ctx->enc_count = 1; + } + group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); + do { + __Pyx_StructField* field = ctx->head->field; + __Pyx_TypeInfo* type = field->type; + if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { + size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); + } else { + size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); + } + if (ctx->enc_packmode == '@') { + size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); + size_t align_mod_offset; + if (align_at == 0) return -1; + align_mod_offset = ctx->fmt_offset % align_at; + if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; + if (ctx->struct_alignment == 0) + ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, + ctx->is_complex); + } + if (type->size != size || type->typegroup != group) { + if (type->typegroup == 'C' && type->fields != NULL) { + size_t parent_offset = ctx->head->parent_offset + field->offset; + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = parent_offset; + continue; + } + if ((type->typegroup == 'H' || group == 'H') && type->size == size) { + } else { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + } + offset = ctx->head->parent_offset + field->offset; + if (ctx->fmt_offset != offset) { + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", + (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); + return -1; + } + ctx->fmt_offset += size; + if (arraysize) + ctx->fmt_offset += (arraysize - 1) * size; + --ctx->enc_count; + while (1) { + if (field == &ctx->root) { + ctx->head = NULL; + if (ctx->enc_count != 0) { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + break; + } + ctx->head->field = ++field; + if (field->type == NULL) { + --ctx->head; + field = ctx->head->field; + continue; + } else if (field->type->typegroup == 'S') { + size_t parent_offset = ctx->head->parent_offset + field->offset; + if (field->type->fields->type == NULL) continue; + field = field->type->fields; + ++ctx->head; + ctx->head->field = field; + ctx->head->parent_offset = parent_offset; + break; + } else { + break; + } + } + } while (ctx->enc_count); + ctx->enc_type = 0; + ctx->is_complex = 0; + return 0; +} +static PyObject * +__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) +{ + const char *ts = *tsp; + int i = 0, number; + int ndim = ctx->head->field->type->ndim; +; + ++ts; + if (ctx->new_count != 1) { + PyErr_SetString(PyExc_ValueError, + "Cannot handle repeated arrays in format string"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + while (*ts && *ts != ')') { + switch (*ts) { + case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; + default: break; + } + number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) + return PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %d", + ctx->head->field->type->arraysize[i], number); + if (*ts != ',' && *ts != ')') + return PyErr_Format(PyExc_ValueError, + "Expected a comma in format string, got '%c'", *ts); + if (*ts == ',') ts++; + i++; + } + if (i != ndim) + return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", + ctx->head->field->type->ndim, i); + if (!*ts) { + PyErr_SetString(PyExc_ValueError, + "Unexpected end of format string, expected ')'"); + return NULL; + } + ctx->is_valid_array = 1; + ctx->new_count = 1; + *tsp = ++ts; + return Py_None; +} +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { + int got_Z = 0; + while (1) { + switch(*ts) { + case 0: + if (ctx->enc_type != 0 && ctx->head == NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + if (ctx->head != NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + return ts; + case ' ': + case '\r': + case '\n': + ++ts; + break; + case '<': + if (!__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '>': + case '!': + if (__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '=': + case '@': + case '^': + ctx->new_packmode = *ts++; + break; + case 'T': + { + const char* ts_after_sub; + size_t i, struct_count = ctx->new_count; + size_t struct_alignment = ctx->struct_alignment; + ctx->new_count = 1; + ++ts; + if (*ts != '{') { + PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + ctx->enc_count = 0; + ctx->struct_alignment = 0; + ++ts; + ts_after_sub = ts; + for (i = 0; i != struct_count; ++i) { + ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); + if (!ts_after_sub) return NULL; + } + ts = ts_after_sub; + if (struct_alignment) ctx->struct_alignment = struct_alignment; + } + break; + case '}': + { + size_t alignment = ctx->struct_alignment; + ++ts; + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + if (alignment && ctx->fmt_offset % alignment) { + ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); + } + } + return ts; + case 'x': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->fmt_offset += ctx->new_count; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->enc_packmode = ctx->new_packmode; + ++ts; + break; + case 'Z': + got_Z = 1; + ++ts; + if (*ts != 'f' && *ts != 'd' && *ts != 'g') { + __Pyx_BufFmt_RaiseUnexpectedChar('Z'); + return NULL; + } + case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': + case 'l': case 'L': case 'q': case 'Q': + case 'f': case 'd': case 'g': + case 'O': case 'p': + if (ctx->enc_type == *ts && got_Z == ctx->is_complex && + ctx->enc_packmode == ctx->new_packmode) { + ctx->enc_count += ctx->new_count; + ctx->new_count = 1; + got_Z = 0; + ++ts; + break; + } + case 's': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_count = ctx->new_count; + ctx->enc_packmode = ctx->new_packmode; + ctx->enc_type = *ts; + ctx->is_complex = got_Z; + ++ts; + ctx->new_count = 1; + got_Z = 0; + break; + case ':': + ++ts; + while(*ts != ':') ++ts; + ++ts; + break; + case '(': + if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; + break; + default: + { + int number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + ctx->new_count = (size_t)number; + } + } + } +} + +/* BufferGetAndValidate */ + static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { + if (unlikely(info->buf == NULL)) return; + if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; + __Pyx_ReleaseBuffer(info); +} +static void __Pyx_ZeroBuffer(Py_buffer* buf) { + buf->buf = NULL; + buf->obj = NULL; + buf->strides = __Pyx_zeros; + buf->shape = __Pyx_zeros; + buf->suboffsets = __Pyx_minusones; +} +static int __Pyx__GetBufferAndValidate( + Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, + int nd, int cast, __Pyx_BufFmt_StackElem* stack) +{ + buf->buf = NULL; + if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { + __Pyx_ZeroBuffer(buf); + return -1; + } + if (unlikely(buf->ndim != nd)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + nd, buf->ndim); + goto fail; + } + if (!cast) { + __Pyx_BufFmt_Context ctx; + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; + } + if (unlikely((unsigned)buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", + buf->itemsize, (buf->itemsize > 1) ? "s" : "", + dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; + return 0; +fail:; + __Pyx_SafeReleaseBuffer(buf); + return -1; +} + +/* None */ + static CYTHON_INLINE long __Pyx_mod_long(long a, long b) { + long r = a % b; + r += ((r != 0) & ((r ^ b) < 0)) * b; + return r; +} + +/* PyObjectCall */ + #if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = func->ob_type->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyErrFetchRestore */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* RaiseException */ + #if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* GetModuleGlobalName */ + static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS + result = PyDict_GetItem(__pyx_d, name); + if (likely(result)) { + Py_INCREF(result); + } else { +#else + result = PyObject_GetItem(__pyx_d, name); + if (!result) { + PyErr_Clear(); +#endif + result = __Pyx_GetBuiltinName(name); + } + return result; +} + +/* None */ + static CYTHON_INLINE long __Pyx_div_long(long a, long b) { + long q = a / b; + long r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* ExtTypeTest */ + static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", + Py_TYPE(obj)->tp_name, type->tp_name); + return 0; +} + +/* None */ + static CYTHON_INLINE int __Pyx_mod_int(int a, int b) { + int r = a % b; + r += ((r != 0) & ((r ^ b) < 0)) * b; + return r; +} + +/* RaiseTooManyValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* RaiseNoneIterError */ + static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/* SaveResetException */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if PY_VERSION_HEX >= 0x030700A2 + *type = tstate->exc_state.exc_type; + *value = tstate->exc_state.exc_value; + *tb = tstate->exc_state.exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if PY_VERSION_HEX >= 0x030700A2 + tmp_type = tstate->exc_state.exc_type; + tmp_value = tstate->exc_state.exc_value; + tmp_tb = tstate->exc_state.exc_traceback; + tstate->exc_state.exc_type = type; + tstate->exc_state.exc_value = value; + tstate->exc_state.exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* PyErrExceptionMatches */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* GetException */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { +#endif + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if PY_VERSION_HEX >= 0x030700A2 + tmp_type = tstate->exc_state.exc_type; + tmp_value = tstate->exc_state.exc_value; + tmp_tb = tstate->exc_state.exc_traceback; + tstate->exc_state.exc_type = local_type; + tstate->exc_state.exc_value = local_value; + tstate->exc_state.exc_traceback = local_tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* Import */ + static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if (strchr(__Pyx_MODULE_NAME, '.')) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* CLineInTraceback */ + #ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (PyObject_Not(use_cline) != 0) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ + static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ + #include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +#if PY_MAJOR_VERSION < 3 +static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { + if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); + PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); + return -1; +} +static void __Pyx_ReleaseBuffer(Py_buffer *view) { + PyObject *obj = view->obj; + if (!obj) return; + if (PyObject_CheckBuffer(obj)) { + PyBuffer_Release(view); + return; + } + if ((0)) {} + else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); + view->obj = NULL; + Py_DECREF(obj); +} +#endif + + + /* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { + const int neg_one = (int) -1, const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* CIntFromPyVerify */ + #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return ::std::complex< float >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return x + y*(__pyx_t_float_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + __pyx_t_float_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabsf(b.real) >= fabsf(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + float r = b.imag / b.real; + float s = 1.0 / (b.real + b.imag * r); + return __pyx_t_float_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + float r = b.real / b.imag; + float s = 1.0 / (b.imag + b.real * r); + return __pyx_t_float_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + float denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_float_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrtf(z.real*z.real + z.imag*z.imag); + #else + return hypotf(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + float r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + float denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(a, a); + case 3: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, a); + case 4: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = powf(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2f(0, -1); + } + } else { + r = __Pyx_c_abs_float(a); + theta = atan2f(a.imag, a.real); + } + lnr = logf(r); + z_r = expf(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cosf(z_theta); + z.imag = z_r * sinf(z_theta); + return z; + } + #endif +#endif + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return ::std::complex< double >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return x + y*(__pyx_t_double_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + __pyx_t_double_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabs(b.real) >= fabs(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + double r = b.imag / b.real; + double s = 1.0 / (b.real + b.imag * r); + return __pyx_t_double_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + double r = b.real / b.imag; + double s = 1.0 / (b.imag + b.real * r); + return __pyx_t_double_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + double denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_double_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrt(z.real*z.real + z.imag*z.imag); + #else + return hypot(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + double r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + double denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(a, a); + case 3: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, a); + case 4: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = pow(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2(0, -1); + } + } else { + r = __Pyx_c_abs_double(a); + theta = atan2(a.imag, a.real); + } + lnr = log(r); + z_r = exp(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cos(z_theta); + z.imag = z_r * sin(z_theta); + return z; + } + #endif +#endif + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { + const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(enum NPY_TYPES) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(enum NPY_TYPES) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { + const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(unsigned int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (unsigned int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0]) + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) { + return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) { + return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) { + return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (unsigned int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(unsigned int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, +digits[0]) + case -2: + if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(unsigned int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + unsigned int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (unsigned int) -1; + } + } else { + unsigned int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (unsigned int) -1; + val = __Pyx_PyInt_As_unsigned_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to unsigned int"); + return (unsigned int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to unsigned int"); + return (unsigned int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { + const int neg_one = (int) -1, const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { + const long neg_one = (long) -1, const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { + const long neg_one = (long) -1, const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* FastTypeChecks */ + #if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { + if (likely(err == exc_type)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); + } + return PyErr_GivenExceptionMatches(err, exc_type); +} +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { + if (likely(err == exc_type1 || err == exc_type2)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); + } + return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); +} +#endif + +/* CheckBinaryVersion */ + static int __Pyx_check_binary_version(void) { + char ctversion[4], rtversion[4]; + PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); + PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); + if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { + char message[200]; + PyOS_snprintf(message, sizeof(message), + "compiletime version %s of module '%.100s' " + "does not match runtime version %s", + ctversion, __Pyx_MODULE_NAME, rtversion); + return PyErr_WarnEx(NULL, message, 1); + } + return 0; +} + +/* ModuleImport */ + #ifndef __PYX_HAVE_RT_ImportModule +#define __PYX_HAVE_RT_ImportModule +static PyObject *__Pyx_ImportModule(const char *name) { + PyObject *py_name = 0; + PyObject *py_module = 0; + py_name = __Pyx_PyIdentifier_FromString(name); + if (!py_name) + goto bad; + py_module = PyImport_Import(py_name); + Py_DECREF(py_name); + return py_module; +bad: + Py_XDECREF(py_name); + return 0; +} +#endif + +/* TypeImport */ + #ifndef __PYX_HAVE_RT_ImportType +#define __PYX_HAVE_RT_ImportType +static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, + size_t size, int strict) +{ + PyObject *py_module = 0; + PyObject *result = 0; + PyObject *py_name = 0; + char warning[200]; + Py_ssize_t basicsize; +#ifdef Py_LIMITED_API + PyObject *py_basicsize; +#endif + py_module = __Pyx_ImportModule(module_name); + if (!py_module) + goto bad; + py_name = __Pyx_PyIdentifier_FromString(class_name); + if (!py_name) + goto bad; + result = PyObject_GetAttr(py_module, py_name); + Py_DECREF(py_name); + py_name = 0; + Py_DECREF(py_module); + py_module = 0; + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#ifndef Py_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if (!strict && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", + module_name, class_name, basicsize, size); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + else if ((size_t)basicsize != size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", + module_name, class_name, basicsize, size); + goto bad; + } + return (PyTypeObject *)result; +bad: + Py_XDECREF(py_module); + Py_XDECREF(result); + return NULL; +} +#endif + +/* InitStrings */ + static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + PyErr_Clear(); + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(x); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/astropy/convolution/boundary_wrap.pyx b/astropy/convolution/boundary_wrap.pyx new file mode 100644 index 0000000..d5a1b9a --- /dev/null +++ b/astropy/convolution/boundary_wrap.pyx @@ -0,0 +1,183 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import division +import numpy as np +cimport numpy as np + +DTYPE = np.float +ctypedef np.float_t DTYPE_t + +cdef extern from "numpy/npy_math.h" nogil: + bint npy_isnan(double x) + +cimport cython + + +@cython.boundscheck(False) # turn off bounds-checking for entire function +def convolve1d_boundary_wrap(np.ndarray[DTYPE_t, ndim=1] f, + np.ndarray[DTYPE_t, ndim=1] g, + bint normalize_by_kernel): + + if g.shape[0] % 2 != 1: + raise ValueError("Convolution kernel must have odd dimensions") + + assert f.dtype == DTYPE and g.dtype == DTYPE + + cdef int nx = f.shape[0] + cdef int nkx = g.shape[0] + cdef int wkx = nkx // 2 + cdef np.ndarray[DTYPE_t, ndim=1] conv = np.empty([nx], dtype=DTYPE) + cdef unsigned int i, iii + cdef int ii + + cdef int iimin, iimax + + cdef DTYPE_t top, bot, ker, val + + # release the GIL + with nogil: + + # Now run the proper convolution + for i in range(nx): + top = 0. + bot = 0. + iimin = i - wkx + iimax = i + wkx + 1 + for ii in range(iimin, iimax): + iii = ii % nx + val = f[iii] + ker = g[(nkx - 1 - (wkx + ii - i))] + if not npy_isnan(val): + top += val * ker + bot += ker + if normalize_by_kernel: + if bot == 0: + conv[i] = f[i] + else: + conv[i] = top / bot + else: + conv[i] = top + # GIL acquired again here + return conv + + +@cython.boundscheck(False) # turn off bounds-checking for entire function +def convolve2d_boundary_wrap(np.ndarray[DTYPE_t, ndim=2] f, + np.ndarray[DTYPE_t, ndim=2] g, + bint normalize_by_kernel): + + if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1: + raise ValueError("Convolution kernel must have odd dimensions") + + assert f.dtype == DTYPE and g.dtype == DTYPE + + cdef int nx = f.shape[0] + cdef int ny = f.shape[1] + cdef int nkx = g.shape[0] + cdef int nky = g.shape[1] + cdef int wkx = nkx // 2 + cdef int wky = nky // 2 + cdef np.ndarray[DTYPE_t, ndim=2] conv = np.empty([nx, ny], dtype=DTYPE) + cdef unsigned int i, j, iii, jjj + cdef int ii, jj + + cdef int iimin, iimax, jjmin, jjmax + + cdef DTYPE_t top, bot, ker, val + + # release the GIL + with nogil: + + # Now run the proper convolution + for i in range(nx): + for j in range(ny): + top = 0. + bot = 0. + iimin = i - wkx + iimax = i + wkx + 1 + jjmin = j - wky + jjmax = j + wky + 1 + for ii in range(iimin, iimax): + for jj in range(jjmin, jjmax): + iii = ii % nx + jjj = jj % ny + val = f[iii, jjj] + ker = g[(nkx - 1 - (wkx + ii - i)), + (nky - 1 - (wky + jj - j))] + if not npy_isnan(val): + top += val * ker + bot += ker + if normalize_by_kernel: + if bot == 0: + conv[i, j] = f[i, j] + else: + conv[i, j] = top / bot + else: + conv[i, j] = top + # GIl acquired again here + return conv + + +@cython.boundscheck(False) # turn off bounds-checking for entire function +def convolve3d_boundary_wrap(np.ndarray[DTYPE_t, ndim=3] f, + np.ndarray[DTYPE_t, ndim=3] g, + bint normalize_by_kernel): + + if g.shape[0] % 2 != 1 or g.shape[1] % 2 != 1 or g.shape[2] % 2 != 1: + raise ValueError("Convolution kernel must have odd dimensions") + + assert f.dtype == DTYPE and g.dtype == DTYPE + + cdef int nx = f.shape[0] + cdef int ny = f.shape[1] + cdef int nz = f.shape[2] + cdef int nkx = g.shape[0] + cdef int nky = g.shape[1] + cdef int nkz = g.shape[2] + cdef int wkx = nkx // 2 + cdef int wky = nky // 2 + cdef int wkz = nkz // 2 + cdef np.ndarray[DTYPE_t, ndim=3] conv = np.empty([nx, ny, nz], dtype=DTYPE) + cdef unsigned int i, j, k, iii, jjj, kkk + cdef int ii, jj, kk + + cdef int iimin, iimax, jjmin, jjmax, kkmin, kkmax + + cdef DTYPE_t top, bot, ker, val + + # release the GIL + with nogil: + + # Now run the proper convolution + for i in range(nx): + for j in range(ny): + for k in range(nz): + top = 0. + bot = 0. + iimin = i - wkx + iimax = i + wkx + 1 + jjmin = j - wky + jjmax = j + wky + 1 + kkmin = k - wkz + kkmax = k + wkz + 1 + for ii in range(iimin, iimax): + for jj in range(jjmin, jjmax): + for kk in range(kkmin, kkmax): + iii = ii % nx + jjj = jj % ny + kkk = kk % nz + val = f[iii, jjj, kkk] + ker = g[(nkx - 1 - (wkx + ii - i)), + (nky - 1 - (wky + jj - j)), + (nkz - 1 - (wkz + kk - k))] + if not npy_isnan(val): + top += val * ker + bot += ker + if normalize_by_kernel: + if bot == 0: + conv[i, j, k] = f[i, j, k] + else: + conv[i, j, k] = top / bot + else: + conv[i, j, k] = top + # GIL acquired again here + return conv diff --git a/astropy/convolution/convolve.py b/astropy/convolution/convolve.py new file mode 100644 index 0000000..ec2796e --- /dev/null +++ b/astropy/convolution/convolve.py @@ -0,0 +1,818 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import warnings + +import numpy as np +from functools import partial + +from .core import Kernel, Kernel1D, Kernel2D, MAX_NORMALIZATION +from ..utils.exceptions import AstropyUserWarning +from ..utils.console import human_file_size +from ..utils.decorators import deprecated_renamed_argument +from .. import units as u +from ..nddata import support_nddata +from ..modeling.core import _make_arithmetic_operator, BINARY_OPERATORS +from ..modeling.core import _CompoundModelMeta + +from ..extern.six.moves import range, zip + + +# Disabling all doctests in this module until a better way of handling warnings +# in doctests can be determined +__doctest_skip__ = ['*'] + +BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend'] + + +@support_nddata(data='array') +def convolve(array, kernel, boundary='fill', fill_value=0., + nan_treatment='interpolate', normalize_kernel=True, mask=None, + preserve_nan=False, normalization_zero_tol=1e-8): + ''' + Convolve an array with a kernel. + + This routine differs from `scipy.ndimage.convolve` because + it includes a special treatment for ``NaN`` values. Rather than + including ``NaN`` values in the array in the convolution calculation, which + causes large ``NaN`` holes in the convolved array, ``NaN`` values are + replaced with interpolated values using the kernel as an interpolation + function. + + Parameters + ---------- + array : `numpy.ndarray` or `~astropy.nddata.NDData` + The array to convolve. This should be a 1, 2, or 3-dimensional array + or a list or a set of nested lists representing a 1, 2, or + 3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of + the `~astropy.nddata.NDData` will be used as the ``mask`` argument. + kernel : `numpy.ndarray` or `~astropy.convolution.Kernel` + The convolution kernel. The number of dimensions should match those for + the array, and the dimensions should be odd in all directions. If a + masked array, the masked values will be replaced by ``fill_value``. + boundary : str, optional + A flag indicating how to handle boundaries: + * `None` + Set the ``result`` values to zero where the kernel + extends beyond the edge of the array (default). + * 'fill' + Set values outside the array boundary to ``fill_value``. + * 'wrap' + Periodic boundary that wrap to the other side of ``array``. + * 'extend' + Set values outside the array to the nearest ``array`` + value. + fill_value : float, optional + The value to use outside the array when using ``boundary='fill'`` + normalize_kernel : bool, optional + Whether to normalize the kernel to have a sum of one prior to + convolving + nan_treatment : 'interpolate', 'fill' + interpolate will result in renormalization of the kernel at each + position ignoring (pixels that are NaN in the image) in both the image + and the kernel. + 'fill' will replace the NaN pixels with a fixed numerical value (default + zero, see ``fill_value``) prior to convolution + Note that if the kernel has a sum equal to zero, NaN interpolation + is not possible and will raise an exception + preserve_nan : bool + After performing convolution, should pixels that were originally NaN + again become NaN? + mask : `None` or `numpy.ndarray` + A "mask" array. Shape must match ``array``, and anything that is masked + (i.e., not 0/`False`) will be set to NaN for the convolution. If + `None`, no masking will be performed unless ``array`` is a masked array. + If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is + masked of it is masked in either ``mask`` *or* ``array.mask``. + normalization_zero_tol: float, optional + The absolute tolerance on whether the kernel is different than zero. + If the kernel sums to zero to within this precision, it cannot be + normalized. Default is "1e-8". + + Returns + ------- + result : `numpy.ndarray` + An array with the same dimensions and as the input array, + convolved with kernel. The data type depends on the input + array type. If array is a floating point type, then the + return array keeps the same data type, otherwise the type + is ``numpy.float``. + + Notes + ----- + For masked arrays, masked values are treated as NaNs. The convolution + is always done at ``numpy.float`` precision. + ''' + from .boundary_none import (convolve1d_boundary_none, + convolve2d_boundary_none, + convolve3d_boundary_none) + + from .boundary_extend import (convolve1d_boundary_extend, + convolve2d_boundary_extend, + convolve3d_boundary_extend) + + from .boundary_fill import (convolve1d_boundary_fill, + convolve2d_boundary_fill, + convolve3d_boundary_fill) + + from .boundary_wrap import (convolve1d_boundary_wrap, + convolve2d_boundary_wrap, + convolve3d_boundary_wrap) + + if boundary not in BOUNDARY_OPTIONS: + raise ValueError("Invalid boundary option: must be one of {0}" + .format(BOUNDARY_OPTIONS)) + + if nan_treatment not in ('interpolate', 'fill'): + raise ValueError("nan_treatment must be one of 'interpolate','fill'") + + # The cython routines all need float type inputs (so, a particular + # bit size, endianness, etc.). So we have to convert, which also + # has the effect of making copies so we don't modify the inputs. + # After this, the variables we work with will be array_internal, and + # kernel_internal. However -- we do want to keep track of what type + # the input array was so we can cast the result to that at the end + # if it's a floating point type. Don't bother with this for lists -- + # just always push those as np.float. + # It is always necessary to make a copy of kernel (since it is modified), + # but, if we just so happen to be lucky enough to have the input array + # have exactly the desired type, we just alias to array_internal + + # Check if kernel is kernel instance + if isinstance(kernel, Kernel): + # Check if array is also kernel instance, if so convolve and + # return new kernel instance + if isinstance(array, Kernel): + if isinstance(array, Kernel1D) and isinstance(kernel, Kernel1D): + new_array = convolve1d_boundary_fill(array.array, kernel.array, + 0, True) + new_kernel = Kernel1D(array=new_array) + elif isinstance(array, Kernel2D) and isinstance(kernel, Kernel2D): + new_array = convolve2d_boundary_fill(array.array, kernel.array, + 0, True) + new_kernel = Kernel2D(array=new_array) + else: + raise Exception("Can't convolve 1D and 2D kernel.") + new_kernel._separable = kernel._separable and array._separable + new_kernel._is_bool = False + return new_kernel + kernel = kernel.array + + # Check that the arguments are lists or Numpy arrays + + if isinstance(array, list): + array_internal = np.array(array, dtype=np.float) + array_dtype = array_internal.dtype + elif isinstance(array, np.ndarray): + # Note this won't copy if it doesn't have to -- which is okay + # because none of what follows modifies array_internal. + array_dtype = array.dtype + array_internal = array.astype(float, copy=False) + else: + raise TypeError("array should be a list or a Numpy array") + + if isinstance(kernel, list): + kernel_internal = np.array(kernel, dtype=float) + elif isinstance(kernel, np.ndarray): + # Note this always makes a copy, since we will be modifying it + kernel_internal = kernel.astype(float) + else: + raise TypeError("kernel should be a list or a Numpy array") + + # Check that the number of dimensions is compatible + if array_internal.ndim != kernel_internal.ndim: + raise Exception('array and kernel have differing number of ' + 'dimensions.') + + # anything that's masked must be turned into NaNs for the interpolation. + # This requires copying the array_internal + array_internal_copied = False + if np.ma.is_masked(array): + array_internal = array_internal.filled(np.nan) + array_internal_copied = True + if mask is not None: + if not array_internal_copied: + array_internal = array_internal.copy() + array_internal_copied = True + # mask != 0 yields a bool mask for all ints/floats/bool + array_internal[mask != 0] = np.nan + if np.ma.is_masked(kernel): + # *kernel* doesn't support NaN interpolation, so instead we just fill it + kernel_internal = kernel.filled(fill_value) + + # Mark the NaN values so we can replace them later if interpolate_nan is + # not set + if preserve_nan: + badvals = np.isnan(array_internal) + + if nan_treatment == 'fill': + initially_nan = np.isnan(array_internal) + array_internal[initially_nan] = fill_value + + # Because the Cython routines have to normalize the kernel on the fly, we + # explicitly normalize the kernel here, and then scale the image at the + # end if normalization was not requested. + kernel_sum = kernel_internal.sum() + kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol) + + if (kernel_sum < 1. / MAX_NORMALIZATION or kernel_sums_to_zero) and normalize_kernel: + raise Exception("The kernel can't be normalized, because its sum is " + "close to zero. The sum of the given kernel is < {0}" + .format(1. / MAX_NORMALIZATION)) + + if not kernel_sums_to_zero: + kernel_internal /= kernel_sum + else: + kernel_internal = kernel + + renormalize_by_kernel = not kernel_sums_to_zero + + if array_internal.ndim == 0: + raise Exception("cannot convolve 0-dimensional arrays") + elif array_internal.ndim == 1: + if boundary == 'extend': + result = convolve1d_boundary_extend(array_internal, + kernel_internal, + renormalize_by_kernel) + elif boundary == 'fill': + result = convolve1d_boundary_fill(array_internal, + kernel_internal, + float(fill_value), + renormalize_by_kernel) + elif boundary == 'wrap': + result = convolve1d_boundary_wrap(array_internal, + kernel_internal, + renormalize_by_kernel) + elif boundary is None: + result = convolve1d_boundary_none(array_internal, + kernel_internal, + renormalize_by_kernel) + elif array_internal.ndim == 2: + if boundary == 'extend': + result = convolve2d_boundary_extend(array_internal, + kernel_internal, + renormalize_by_kernel, + ) + elif boundary == 'fill': + result = convolve2d_boundary_fill(array_internal, + kernel_internal, + float(fill_value), + renormalize_by_kernel, + ) + elif boundary == 'wrap': + result = convolve2d_boundary_wrap(array_internal, + kernel_internal, + renormalize_by_kernel, + ) + elif boundary is None: + result = convolve2d_boundary_none(array_internal, + kernel_internal, + renormalize_by_kernel, + ) + elif array_internal.ndim == 3: + if boundary == 'extend': + result = convolve3d_boundary_extend(array_internal, + kernel_internal, + renormalize_by_kernel) + elif boundary == 'fill': + result = convolve3d_boundary_fill(array_internal, + kernel_internal, + float(fill_value), + renormalize_by_kernel) + elif boundary == 'wrap': + result = convolve3d_boundary_wrap(array_internal, + kernel_internal, + renormalize_by_kernel) + elif boundary is None: + result = convolve3d_boundary_none(array_internal, + kernel_internal, + renormalize_by_kernel) + else: + raise NotImplementedError('convolve only supports 1, 2, and 3-dimensional ' + 'arrays at this time') + + # If normalization was not requested, we need to scale the array (since + # the kernel is effectively normalized within the cython functions) + if not normalize_kernel and not kernel_sums_to_zero: + result *= kernel_sum + + if preserve_nan: + result[badvals] = np.nan + + if nan_treatment == 'fill': + array_internal[initially_nan] = np.nan + + # Try to preserve the input type if it's a floating point type + if array_dtype.kind == 'f': + # Avoid making another copy if possible + try: + return result.astype(array_dtype, copy=False) + except TypeError: + return result.astype(array_dtype) + else: + return result + + +@deprecated_renamed_argument('interpolate_nan', 'nan_treatment', 'v2.0.0') +@support_nddata(data='array') +def convolve_fft(array, kernel, boundary='fill', fill_value=0., + nan_treatment='interpolate', normalize_kernel=True, + normalization_zero_tol=1e-8, + preserve_nan=False, mask=None, crop=True, return_fft=False, + fft_pad=None, psf_pad=None, quiet=False, + min_wt=0.0, allow_huge=False, + fftn=np.fft.fftn, ifftn=np.fft.ifftn, + complex_dtype=np.complex): + """ + Convolve an ndarray with an nd-kernel. Returns a convolved image with + ``shape = array.shape``. Assumes kernel is centered. + + `convolve_fft` is very similar to `convolve` in that it replaces ``NaN`` + values in the original image with interpolated values using the kernel as + an interpolation function. However, it also includes many additional + options specific to the implementation. + + `convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways: + + * It can treat ``NaN`` values as zeros or interpolate over them. + * ``inf`` values are treated as ``NaN`` + * (optionally) It pads to the nearest 2^n size to improve FFT speed. + * Its only valid ``mode`` is 'same' (i.e., the same shape array is returned) + * It lets you use your own fft, e.g., + `pyFFTW `_ or + `pyFFTW3 `_ , which can lead to + performance improvements, depending on your system configuration. pyFFTW3 + is threaded, and therefore may yield significant performance benefits on + multi-core machines at the cost of greater memory requirements. Specify + the ``fftn`` and ``ifftn`` keywords to override the default, which is + `numpy.fft.fft` and `numpy.fft.ifft`. + + Parameters + ---------- + array : `numpy.ndarray` + Array to be convolved with ``kernel``. It can be of any + dimensionality, though only 1, 2, and 3d arrays have been tested. + kernel : `numpy.ndarray` or `astropy.convolution.Kernel` + The convolution kernel. The number of dimensions should match those + for the array. The dimensions *do not* have to be odd in all directions, + unlike in the non-fft `convolve` function. The kernel will be + normalized if ``normalize_kernel`` is set. It is assumed to be centered + (i.e., shifts may result if your kernel is asymmetric) + boundary : {'fill', 'wrap'}, optional + A flag indicating how to handle boundaries: + + * 'fill': set values outside the array boundary to fill_value + (default) + * 'wrap': periodic boundary + + The `None` and 'extend' parameters are not supported for FFT-based + convolution + fill_value : float, optional + The value to use outside the array when using boundary='fill' + nan_treatment : 'interpolate', 'fill' + ``interpolate`` will result in renormalization of the kernel at each + position ignoring (pixels that are NaN in the image) in both the image + and the kernel. ``fill`` will replace the NaN pixels with a fixed + numerical value (default zero, see ``fill_value``) prior to + convolution. Note that if the kernel has a sum equal to zero, NaN + interpolation is not possible and will raise an exception. + normalize_kernel : function or boolean, optional + If specified, this is the function to divide kernel by to normalize it. + e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be: + ``kernel = kernel / np.sum(kernel)``. If True, defaults to + ``normalize_kernel = np.sum``. + normalization_zero_tol: float, optional + The absolute tolerance on whether the kernel is different than zero. + If the kernel sums to zero to within this precision, it cannot be + normalized. Default is "1e-8". + preserve_nan : bool + After performing convolution, should pixels that were originally NaN + again become NaN? + mask : `None` or `numpy.ndarray` + A "mask" array. Shape must match ``array``, and anything that is masked + (i.e., not 0/`False`) will be set to NaN for the convolution. If + `None`, no masking will be performed unless ``array`` is a masked array. + If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is + masked of it is masked in either ``mask`` *or* ``array.mask``. + + + Other Parameters + ---------------- + min_wt : float, optional + If ignoring ``NaN`` / zeros, force all grid points with a weight less than + this value to ``NaN`` (the weight of a grid point with *no* ignored + neighbors is 1.0). + If ``min_wt`` is zero, then all zero-weight points will be set to zero + instead of ``NaN`` (which they would be otherwise, because 1/0 = nan). + See the examples below + fft_pad : bool, optional + Default on. Zero-pad image to the nearest 2^n. With + ``boundary='wrap'``, this will be disabled. + psf_pad : bool, optional + Zero-pad image to be at least the sum of the image sizes to avoid + edge-wrapping when smoothing. This is enabled by default with + ``boundary='fill'``, but it can be overridden with a boolean option. + ``boundary='wrap'`` and ``psf_pad=True`` are not compatible. + crop : bool, optional + Default on. Return an image of the size of the larger of the input + image and the kernel. + If the image and kernel are asymmetric in opposite directions, will + return the largest image in both directions. + For example, if an input image has shape [100,3] but a kernel with shape + [6,6] is used, the output will be [100,6]. + return_fft : bool, optional + Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is + ``ifft(fft(image)*fft(kernel))``). Useful for making PSDs. + fftn, ifftn : functions, optional + The fft and inverse fft functions. Can be overridden to use your own + ffts, e.g. an fftw3 wrapper or scipy's fftn, + ``fft=scipy.fftpack.fftn`` + complex_dtype : numpy.complex, optional + Which complex dtype to use. `numpy` has a range of options, from 64 to + 256. + quiet : bool, optional + Silence warning message about NaN interpolation + allow_huge : bool, optional + Allow huge arrays in the FFT? If False, will raise an exception if the + array or kernel size is >1 GB + + Raises + ------ + ValueError: + If the array is bigger than 1 GB after padding, will raise this exception + unless ``allow_huge`` is True + + See Also + -------- + convolve: + Convolve is a non-fft version of this code. It is more memory + efficient and for small kernels can be faster. + + Returns + ------- + default : ndarray + ``array`` convolved with ``kernel``. If ``return_fft`` is set, returns + ``fft(array) * fft(kernel)``. If crop is not set, returns the + image, but with the fft-padded size instead of the input size + + Notes + ----- + With ``psf_pad=True`` and a large PSF, the resulting data can become + very large and consume a lot of memory. See Issue + https://github.com/astropy/astropy/pull/4366 for further detail. + + Examples + -------- + >>> convolve_fft([1, 0, 3], [1, 1, 1]) + array([ 1., 4., 3.]) + + >>> convolve_fft([1, np.nan, 3], [1, 1, 1]) + array([ 1., 4., 3.]) + + >>> convolve_fft([1, 0, 3], [0, 1, 0]) + array([ 1., 0., 3.]) + + >>> convolve_fft([1, 2, 3], [1]) + array([ 1., 2., 3.]) + + >>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate') + ... + array([ 1., 0., 3.]) + + >>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate', + ... min_wt=1e-8) + array([ 1., nan, 3.]) + + >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate') + array([ 1., 4., 3.]) + + >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', + ... normalize_kernel=True) + array([ 1., 2., 3.]) + + >>> import scipy.fftpack # optional - requires scipy + >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', + ... normalize_kernel=True, + ... fftn=scipy.fftpack.fft, ifftn=scipy.fftpack.ifft) + array([ 1., 2., 3.]) + + """ + # Checking copied from convolve.py - however, since FFTs have real & + # complex components, we change the types. Only the real part will be + # returned! Note that this always makes a copy. + + # Check kernel is kernel instance + if isinstance(kernel, Kernel): + kernel = kernel.array + if isinstance(array, Kernel): + raise TypeError("Can't convolve two kernels with convolve_fft. " + "Use convolve instead.") + + if nan_treatment not in ('interpolate', 'fill'): + raise ValueError("nan_treatment must be one of 'interpolate','fill'") + + # Convert array dtype to complex + # and ensure that list inputs become arrays + array = np.asarray(array, dtype=np.complex) + kernel = np.asarray(kernel, dtype=np.complex) + + # Check that the number of dimensions is compatible + if array.ndim != kernel.ndim: + raise ValueError("Image and kernel must have same number of " + "dimensions") + + arrayshape = array.shape + kernshape = kernel.shape + + array_size_B = (np.product(arrayshape, dtype=np.int64) * + np.dtype(complex_dtype).itemsize)*u.byte + if array_size_B > 1*u.GB and not allow_huge: + raise ValueError("Size Error: Arrays will be {}. Use " + "allow_huge=True to override this exception." + .format(human_file_size(array_size_B.to_value(u.byte)))) + + # mask catching - masks must be turned into NaNs for use later in the image + if np.ma.is_masked(array): + mamask = array.mask + array = np.array(array) + array[mamask] = np.nan + elif mask is not None: + # copying here because we have to mask it below. But no need to copy + # if mask is None because we won't modify it. + array = np.array(array) + if mask is not None: + # mask != 0 yields a bool mask for all ints/floats/bool + array[mask != 0] = np.nan + # the *kernel* doesn't support NaN interpolation, so instead we just fill it + if np.ma.is_masked(kernel): + kernel = kernel.filled(0) + + # NaN and inf catching + nanmaskarray = np.isnan(array) | np.isinf(array) + array[nanmaskarray] = 0 + nanmaskkernel = np.isnan(kernel) | np.isinf(kernel) + kernel[nanmaskkernel] = 0 + + if normalize_kernel is True: + if kernel.sum() < 1. / MAX_NORMALIZATION: + raise Exception("The kernel can't be normalized, because its sum is " + "close to zero. The sum of the given kernel is < {0}" + .format(1. / MAX_NORMALIZATION)) + kernel_scale = kernel.sum() + normalized_kernel = kernel / kernel_scale + kernel_scale = 1 # if we want to normalize it, leave it normed! + elif normalize_kernel: + # try this. If a function is not passed, the code will just crash... I + # think type checking would be better but PEPs say otherwise... + kernel_scale = normalize_kernel(kernel) + normalized_kernel = kernel / kernel_scale + else: + kernel_scale = kernel.sum() + if np.abs(kernel_scale) < normalization_zero_tol: + if nan_treatment == 'interpolate': + raise ValueError('Cannot interpolate NaNs with an unnormalizable kernel') + else: + # the kernel's sum is near-zero, so it can't be scaled + kernel_scale = 1 + normalized_kernel = kernel + else: + # the kernel is normalizable; we'll temporarily normalize it + # now and undo the normalization later. + normalized_kernel = kernel / kernel_scale + + if boundary is None: + warnings.warn("The convolve_fft version of boundary=None is " + "equivalent to the convolve boundary='fill'. There is " + "no FFT equivalent to convolve's " + "zero-if-kernel-leaves-boundary", AstropyUserWarning) + if psf_pad is None: + psf_pad = True + if fft_pad is None: + fft_pad = True + elif boundary == 'fill': + # create a boundary region at least as large as the kernel + if psf_pad is False: + warnings.warn("psf_pad was set to {0}, which overrides the " + "boundary='fill' setting.".format(psf_pad), + AstropyUserWarning) + else: + psf_pad = True + if fft_pad is None: + # default is 'True' according to the docstring + fft_pad = True + elif boundary == 'wrap': + if psf_pad: + raise ValueError("With boundary='wrap', psf_pad cannot be enabled.") + psf_pad = False + if fft_pad: + raise ValueError("With boundary='wrap', fft_pad cannot be enabled.") + fft_pad = False + fill_value = 0 # force zero; it should not be used + elif boundary == 'extend': + raise NotImplementedError("The 'extend' option is not implemented " + "for fft-based convolution") + + # find ideal size (power of 2) for fft. + # Can add shapes because they are tuples + if fft_pad: # default=True + if psf_pad: # default=False + # add the dimensions and then take the max (bigger) + fsize = 2 ** np.ceil(np.log2( + np.max(np.array(arrayshape) + np.array(kernshape)))) + else: + # add the shape lists (max of a list of length 4) (smaller) + # also makes the shapes square + fsize = 2 ** np.ceil(np.log2(np.max(arrayshape + kernshape))) + newshape = np.array([fsize for ii in range(array.ndim)], dtype=int) + else: + if psf_pad: + # just add the biggest dimensions + newshape = np.array(arrayshape) + np.array(kernshape) + else: + newshape = np.array([np.max([imsh, kernsh]) + for imsh, kernsh in zip(arrayshape, kernshape)]) + + # perform a second check after padding + array_size_C = (np.product(newshape, dtype=np.int64) * + np.dtype(complex_dtype).itemsize)*u.byte + if array_size_C > 1*u.GB and not allow_huge: + raise ValueError("Size Error: Arrays will be {}. Use " + "allow_huge=True to override this exception." + .format(human_file_size(array_size_C))) + + # For future reference, this can be used to predict "almost exactly" + # how much *additional* memory will be used. + # size * (array + kernel + kernelfft + arrayfft + + # (kernel*array)fft + + # optional(weight image + weight_fft + weight_ifft) + + # optional(returned_fft)) + # total_memory_used_GB = (np.product(newshape)*np.dtype(complex_dtype).itemsize + # * (5 + 3*((interpolate_nan or ) and kernel_is_normalized)) + # + (1 + (not return_fft)) * + # np.product(arrayshape)*np.dtype(complex_dtype).itemsize + # + np.product(arrayshape)*np.dtype(bool).itemsize + # + np.product(kernshape)*np.dtype(bool).itemsize) + # ) / 1024.**3 + + # separate each dimension by the padding size... this is to determine the + # appropriate slice size to get back to the input dimensions + arrayslices = [] + kernslices = [] + for ii, (newdimsize, arraydimsize, kerndimsize) in enumerate(zip(newshape, arrayshape, kernshape)): + center = newdimsize - (newdimsize + 1) // 2 + arrayslices += [slice(center - arraydimsize // 2, + center + (arraydimsize + 1) // 2)] + kernslices += [slice(center - kerndimsize // 2, + center + (kerndimsize + 1) // 2)] + + if not np.all(newshape == arrayshape): + if np.isfinite(fill_value): + bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value + else: + bigarray = np.zeros(newshape, dtype=complex_dtype) + bigarray[arrayslices] = array + else: + bigarray = array + + if not np.all(newshape == kernshape): + bigkernel = np.zeros(newshape, dtype=complex_dtype) + bigkernel[kernslices] = normalized_kernel + else: + bigkernel = normalized_kernel + + arrayfft = fftn(bigarray) + # need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity + kernfft = fftn(np.fft.ifftshift(bigkernel)) + fftmult = arrayfft * kernfft + + interpolate_nan = (nan_treatment == 'interpolate') + if interpolate_nan: + if not np.isfinite(fill_value): + bigimwt = np.zeros(newshape, dtype=complex_dtype) + else: + bigimwt = np.ones(newshape, dtype=complex_dtype) + + bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan + wtfft = fftn(bigimwt) + + # You can only get to this point if kernel_is_normalized + wtfftmult = wtfft * kernfft + wtsm = ifftn(wtfftmult) + # need to re-zero weights outside of the image (if it is padded, we + # still don't weight those regions) + bigimwt[arrayslices] = wtsm.real[arrayslices] + # curiously, at the floating-point limit, can get slightly negative numbers + # they break the min_wt=0 "flag" and must therefore be removed + bigimwt[bigimwt < 0] = 0 + else: + bigimwt = 1 + + if np.isnan(fftmult).any(): + # this check should be unnecessary; call it an insanity check + raise ValueError("Encountered NaNs in convolve. This is disallowed.") + + # restore NaNs in original image (they were modified inplace earlier) + # We don't have to worry about masked arrays - if input was masked, it was + # copied + array[nanmaskarray] = np.nan + kernel[nanmaskkernel] = np.nan + + fftmult *= kernel_scale + + if return_fft: + return fftmult + + if interpolate_nan: + rifft = (ifftn(fftmult)) / bigimwt + if not np.isscalar(bigimwt): + rifft[bigimwt < min_wt] = np.nan + if min_wt == 0.0: + rifft[bigimwt == 0.0] = 0.0 + else: + rifft = (ifftn(fftmult)) + + if preserve_nan: + rifft[nanmaskarray] = np.nan + + if crop: + result = rifft[arrayslices].real + return result + else: + return rifft.real + + +def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs): + """ + Given a data set containing NaNs, replace the NaNs by interpolating from + neighboring data points with a given kernel. + + Parameters + ---------- + array : `numpy.ndarray` + Array to be convolved with ``kernel``. It can be of any + dimensionality, though only 1, 2, and 3d arrays have been tested. + kernel : `numpy.ndarray` or `astropy.convolution.Kernel` + The convolution kernel. The number of dimensions should match those + for the array. The dimensions *do not* have to be odd in all directions, + unlike in the non-fft `convolve` function. The kernel will be + normalized if ``normalize_kernel`` is set. It is assumed to be centered + (i.e., shifts may result if your kernel is asymmetric). The kernel + *must be normalizable* (i.e., its sum cannot be zero). + convolve : `convolve` or `convolve_fft` + One of the two convolution functions defined in this package. + + Returns + ------- + newarray : `numpy.ndarray` + A copy of the original array with NaN pixels replaced with their + interpolated counterparts + """ + + if not np.any(np.isnan(array)): + return array.copy() + + newarray = array.copy() + + convolved = convolve(array, kernel, nan_treatment='interpolate', + normalize_kernel=True, **kwargs) + + isnan = np.isnan(array) + newarray[isnan] = convolved[isnan] + + return newarray + + +def convolve_models(model, kernel, mode='convolve_fft', **kwargs): + """ + Convolve two models using `~astropy.convolution.convolve_fft`. + + Parameters + ---------- + model : `~astropy.modeling.core.Model` + Functional model + kernel : `~astropy.modeling.core.Model` + Convolution kernel + mode : str + Keyword representing which function to use for convolution. + * 'convolve_fft' : use `~astropy.convolution.convolve_fft` function. + * 'convolve' : use `~astropy.convolution.convolve`. + kwargs : dict + Keyword arguments to me passed either to `~astropy.convolution.convolve` + or `~astropy.convolution.convolve_fft` depending on ``mode``. + + Returns + ------- + default : CompoundModel + Convolved model + """ + + if mode == 'convolve_fft': + BINARY_OPERATORS['convolve_fft'] = _make_arithmetic_operator(partial(convolve_fft, **kwargs)) + elif mode == 'convolve': + BINARY_OPERATORS['convolve'] = _make_arithmetic_operator(partial(convolve, **kwargs)) + else: + raise ValueError('Mode {} is not supported.'.format(mode)) + + return _CompoundModelMeta._from_operator(mode, model, kernel) diff --git a/astropy/convolution/core.py b/astropy/convolution/core.py new file mode 100644 index 0000000..632e324 --- /dev/null +++ b/astropy/convolution/core.py @@ -0,0 +1,372 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This module contains the convolution and filter functionalities of astropy. + +A few conceptual notes: +A filter kernel is mainly characterized by its response function. In the 1D +case we speak of "impulse response function", in the 2D case we call it "point +spread function". This response function is given for every kernel by an +astropy `FittableModel`, which is evaluated on a grid to obtain a filter array, +which can then be applied to binned data. + +The model is centered on the array and should have an amplitude such that the array +integrates to one per default. + +Currently only symmetric 2D kernels are supported. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import warnings +import copy + +import numpy as np +from ..utils.exceptions import AstropyUserWarning +from .utils import (discretize_model, add_kernel_arrays_1D, + add_kernel_arrays_2D) + +MAX_NORMALIZATION = 100 + +__all__ = ['Kernel', 'Kernel1D', 'Kernel2D', 'kernel_arithmetics'] + + +class Kernel(object): + """ + Convolution kernel base class. + + Parameters + ---------- + array : `~numpy.ndarray` + Kernel array. + """ + _separable = False + _is_bool = True + _model = None + + def __init__(self, array): + self._array = np.asanyarray(array) + + @property + def truncation(self): + """ + Deviation from the normalization to one. + """ + return self._truncation + + @property + def is_bool(self): + """ + Indicates if kernel is bool. + + If the kernel is bool the multiplication in the convolution could + be omitted, to increase the performance. + """ + return self._is_bool + + @property + def model(self): + """ + Kernel response model. + """ + return self._model + + @property + def dimension(self): + """ + Kernel dimension. + """ + return self.array.ndim + + @property + def center(self): + """ + Index of the kernel center. + """ + return [axes_size // 2 for axes_size in self._array.shape] + + def normalize(self, mode='integral'): + """ + Normalize the filter kernel. + + Parameters + ---------- + mode : {'integral', 'peak'} + One of the following modes: + * 'integral' (default) + Kernel is normalized such that its integral = 1. + * 'peak' + Kernel is normalized such that its peak = 1. + """ + + if mode == 'integral': + normalization = self._array.sum() + elif mode == 'peak': + normalization = self._array.max() + else: + raise ValueError("invalid mode, must be 'integral' or 'peak'") + + # Warn the user for kernels that sum to zero + if normalization == 0: + warnings.warn('The kernel cannot be normalized because it ' + 'sums to zero.', AstropyUserWarning) + else: + np.divide(self._array, normalization, self._array) + + self._kernel_sum = self._array.sum() + + @property + def shape(self): + """ + Shape of the kernel array. + """ + return self._array.shape + + @property + def separable(self): + """ + Indicates if the filter kernel is separable. + + A 2D filter is separable, when its filter array can be written as the + outer product of two 1D arrays. + + If a filter kernel is separable, higher dimension convolutions will be + performed by applying the 1D filter array consecutively on every dimension. + This is significantly faster, than using a filter array with the same + dimension. + """ + return self._separable + + @property + def array(self): + """ + Filter kernel array. + """ + return self._array + + def __add__(self, kernel): + """ + Add two filter kernels. + """ + return kernel_arithmetics(self, kernel, 'add') + + def __sub__(self, kernel): + """ + Subtract two filter kernels. + """ + return kernel_arithmetics(self, kernel, 'sub') + + def __mul__(self, value): + """ + Multiply kernel with number or convolve two kernels. + """ + return kernel_arithmetics(self, value, "mul") + + def __rmul__(self, value): + """ + Multiply kernel with number or convolve two kernels. + """ + return kernel_arithmetics(self, value, "mul") + + def __array__(self): + """ + Array representation of the kernel. + """ + return self._array + + def __array_wrap__(self, array, context=None): + """ + Wrapper for multiplication with numpy arrays. + """ + if type(context[0]) == np.ufunc: + return NotImplemented + else: + return array + + +class Kernel1D(Kernel): + """ + Base class for 1D filter kernels. + + Parameters + ---------- + model : `~astropy.modeling.FittableModel` + Model to be evaluated. + x_size : odd int, optional + Size of the kernel array. Default = 8 * width. + array : `~numpy.ndarray` + Kernel array. + width : number + Width of the filter kernel. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by linearly interpolating + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + """ + + def __init__(self, model=None, x_size=None, array=None, **kwargs): + # Initialize from model + if array is None: + if self._model is None: + raise TypeError("Must specify either array or model.") + + if x_size is None: + x_size = self._default_size + elif x_size != int(x_size): + raise TypeError("x_size should be an integer") + + # Set ranges where to evaluate the model + + if x_size % 2 == 0: # even kernel + x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5) + else: # odd kernel + x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1) + + array = discretize_model(self._model, x_range, **kwargs) + + # Initialize from array + elif array is not None: + self._model = None + + super(Kernel1D, self).__init__(array) + + +class Kernel2D(Kernel): + """ + Base class for 2D filter kernels. + + Parameters + ---------- + model : `~astropy.modeling.FittableModel` + Model to be evaluated. + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * width. + y_size : odd int, optional + Size in y direction of the kernel array. Default = 8 * width. + array : `~numpy.ndarray` + Kernel array. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + width : number + Width of the filter kernel. + factor : number, optional + Factor of oversampling. Default factor = 10. + """ + + def __init__(self, model=None, x_size=None, y_size=None, array=None, **kwargs): + + # Initialize from model + if array is None: + if self._model is None: + raise TypeError("Must specify either array or model.") + + if x_size is None: + x_size = self._default_size + elif x_size != int(x_size): + raise TypeError("x_size should be an integer") + + if y_size is None: + y_size = x_size + elif y_size != int(y_size): + raise TypeError("y_size should be an integer") + + # Set ranges where to evaluate the model + + if x_size % 2 == 0: # even kernel + x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5) + else: # odd kernel + x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1) + + if y_size % 2 == 0: # even kernel + y_range = (-(int(y_size)) // 2 + 0.5, (int(y_size)) // 2 + 0.5) + else: # odd kernel + y_range = (-(int(y_size) - 1) // 2, (int(y_size) - 1) // 2 + 1) + + array = discretize_model(self._model, x_range, y_range, **kwargs) + + # Initialize from array + elif array is not None: + self._model = None + + super(Kernel2D, self).__init__(array) + + +def kernel_arithmetics(kernel, value, operation): + """ + Add, subtract or multiply two kernels. + + Parameters + ---------- + kernel : `astropy.convolution.Kernel` + Kernel instance + value : kernel, float or int + Value to operate with + operation : {'add', 'sub', 'mul'} + One of the following operations: + * 'add' + Add two kernels + * 'sub' + Subtract two kernels + * 'mul' + Multiply kernel with number or convolve two kernels. + """ + # 1D kernels + if isinstance(kernel, Kernel1D) and isinstance(value, Kernel1D): + if operation == "add": + new_array = add_kernel_arrays_1D(kernel.array, value.array) + if operation == "sub": + new_array = add_kernel_arrays_1D(kernel.array, -value.array) + if operation == "mul": + raise Exception("Kernel operation not supported. Maybe you want " + "to use convolve(kernel1, kernel2) instead.") + new_kernel = Kernel1D(array=new_array) + new_kernel._separable = kernel._separable and value._separable + new_kernel._is_bool = kernel._is_bool or value._is_bool + + # 2D kernels + elif isinstance(kernel, Kernel2D) and isinstance(value, Kernel2D): + if operation == "add": + new_array = add_kernel_arrays_2D(kernel.array, value.array) + if operation == "sub": + new_array = add_kernel_arrays_2D(kernel.array, -value.array) + if operation == "mul": + raise Exception("Kernel operation not supported. Maybe you want " + "to use convolve(kernel1, kernel2) instead.") + new_kernel = Kernel2D(array=new_array) + new_kernel._separable = kernel._separable and value._separable + new_kernel._is_bool = kernel._is_bool or value._is_bool + + # kernel and number + elif ((isinstance(kernel, Kernel1D) or isinstance(kernel, Kernel2D)) + and np.isscalar(value)): + if operation == "mul": + new_kernel = copy.copy(kernel) + new_kernel._array *= value + else: + raise Exception("Kernel operation not supported.") + else: + raise Exception("Kernel operation not supported.") + return new_kernel diff --git a/astropy/convolution/kernels.py b/astropy/convolution/kernels.py new file mode 100644 index 0000000..d9c9a18 --- /dev/null +++ b/astropy/convolution/kernels.py @@ -0,0 +1,1018 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import math + +import numpy as np + +from .core import Kernel1D, Kernel2D, Kernel +from .utils import KernelSizeError +from ..modeling import models +from ..modeling.core import Fittable1DModel, Fittable2DModel + + +__all__ = ['Gaussian1DKernel', 'Gaussian2DKernel', 'CustomKernel', + 'Box1DKernel', 'Box2DKernel', 'Tophat2DKernel', + 'Trapezoid1DKernel', 'MexicanHat1DKernel', 'MexicanHat2DKernel', + 'AiryDisk2DKernel', 'Moffat2DKernel', 'Model1DKernel', + 'Model2DKernel', 'TrapezoidDisk2DKernel', 'Ring2DKernel'] + + +def _round_up_to_odd_integer(value): + i = int(math.ceil(value)) # TODO: int() call is only needed for six.PY2 + if i % 2 == 0: + return i + 1 + else: + return i + + +class Gaussian1DKernel(Kernel1D): + """ + 1D Gaussian filter kernel. + + The Gaussian filter is a filter with great smoothing properties. It is + isotropic and does not produce artifacts. + + Parameters + ---------- + stddev : number + Standard deviation of the Gaussian kernel. + x_size : odd int, optional + Size of the kernel array. Default = 8 * stddev + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by linearly interpolating + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. Very slow. + factor : number, optional + Factor of oversampling. Default factor = 10. If the factor + is too large, evaluation can be very slow. + + + See Also + -------- + Box1DKernel, Trapezoid1DKernel, MexicanHat1DKernel + + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Gaussian1DKernel + gauss_1D_kernel = Gaussian1DKernel(10) + plt.plot(gauss_1D_kernel, drawstyle='steps') + plt.xlabel('x [pixels]') + plt.ylabel('value') + plt.show() + """ + _separable = True + _is_bool = False + + def __init__(self, stddev, **kwargs): + self._model = models.Gaussian1D(1. / (np.sqrt(2 * np.pi) * stddev), + 0, stddev) + self._default_size = _round_up_to_odd_integer(8 * stddev) + super(Gaussian1DKernel, self).__init__(**kwargs) + self._truncation = np.abs(1. - self._array.sum()) + + +class Gaussian2DKernel(Kernel2D): + """ + 2D Gaussian filter kernel. + + The Gaussian filter is a filter with great smoothing properties. It is + isotropic and does not produce artifacts. + + Parameters + ---------- + stddev : number + Standard deviation of the Gaussian kernel. + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * stddev. + y_size : odd int, optional + Size in y direction of the kernel array. Default = 8 * stddev. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + + See Also + -------- + Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, Ring2DKernel, + TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Gaussian2DKernel + gaussian_2D_kernel = Gaussian2DKernel(10) + plt.imshow(gaussian_2D_kernel, interpolation='none', origin='lower') + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + + """ + _separable = True + _is_bool = False + + def __init__(self, stddev, **kwargs): + self._model = models.Gaussian2D(1. / (2 * np.pi * stddev ** 2), 0, + 0, stddev, stddev) + self._default_size = _round_up_to_odd_integer(8 * stddev) + super(Gaussian2DKernel, self).__init__(**kwargs) + self._truncation = np.abs(1. - self._array.sum()) + + +class Box1DKernel(Kernel1D): + """ + 1D Box filter kernel. + + The Box filter or running mean is a smoothing filter. It is not isotropic + and can produce artifacts, when applied repeatedly to the same data. + + By default the Box kernel uses the ``linear_interp`` discretization mode, + which allows non-shifting, even-sized kernels. This is achieved by + weighting the edge pixels with 1/2. E.g a Box kernel with an effective + smoothing of 4 pixel would have the following array: [0.5, 1, 1, 1, 0.5]. + + + Parameters + ---------- + width : number + Width of the filter kernel. + mode : str, optional + One of the following discretization modes: + * 'center' + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' (default) + Discretize model by linearly interpolating + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + See Also + -------- + Gaussian1DKernel, Trapezoid1DKernel, MexicanHat1DKernel + + + Examples + -------- + Kernel response function: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Box1DKernel + box_1D_kernel = Box1DKernel(9) + plt.plot(box_1D_kernel, drawstyle='steps') + plt.xlim(-1, 9) + plt.xlabel('x [pixels]') + plt.ylabel('value') + plt.show() + + """ + _separable = True + _is_bool = True + + def __init__(self, width, **kwargs): + self._model = models.Box1D(1. / width, 0, width) + self._default_size = _round_up_to_odd_integer(width) + kwargs['mode'] = 'linear_interp' + super(Box1DKernel, self).__init__(**kwargs) + self._truncation = 0 + self.normalize() + + +class Box2DKernel(Kernel2D): + """ + 2D Box filter kernel. + + The Box filter or running mean is a smoothing filter. It is not isotropic + and can produce artifact, when applied repeatedly to the same data. + + By default the Box kernel uses the ``linear_interp`` discretization mode, + which allows non-shifting, even-sized kernels. This is achieved by + weighting the edge pixels with 1/2. + + + Parameters + ---------- + width : number + Width of the filter kernel. + mode : str, optional + One of the following discretization modes: + * 'center' + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' (default) + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + + See Also + -------- + Gaussian2DKernel, Tophat2DKernel, MexicanHat2DKernel, Ring2DKernel, + TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Box2DKernel + box_2D_kernel = Box2DKernel(9) + plt.imshow(box_2D_kernel, interpolation='none', origin='lower', + vmin=0.0, vmax=0.015) + plt.xlim(-1, 9) + plt.ylim(-1, 9) + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + """ + _separable = True + _is_bool = True + + def __init__(self, width, **kwargs): + self._model = models.Box2D(1. / width ** 2, 0, 0, width, width) + self._default_size = _round_up_to_odd_integer(width) + kwargs['mode'] = 'linear_interp' + super(Box2DKernel, self).__init__(**kwargs) + self._truncation = 0 + self.normalize() + + +class Tophat2DKernel(Kernel2D): + """ + 2D Tophat filter kernel. + + The Tophat filter is an isotropic smoothing filter. It can produce + artifacts when applied repeatedly on the same data. + + Parameters + ---------- + radius : int + Radius of the filter kernel. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + + See Also + -------- + Gaussian2DKernel, Box2DKernel, MexicanHat2DKernel, Ring2DKernel, + TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Tophat2DKernel + tophat_2D_kernel = Tophat2DKernel(40) + plt.imshow(tophat_2D_kernel, interpolation='none', origin='lower') + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + + """ + def __init__(self, radius, **kwargs): + self._model = models.Disk2D(1. / (np.pi * radius ** 2), 0, 0, radius) + self._default_size = _round_up_to_odd_integer(2 * radius) + super(Tophat2DKernel, self).__init__(**kwargs) + self._truncation = 0 + + +class Ring2DKernel(Kernel2D): + """ + 2D Ring filter kernel. + + The Ring filter kernel is the difference between two Tophat kernels of + different width. This kernel is useful for, e.g., background estimation. + + Parameters + ---------- + radius_in : number + Inner radius of the ring kernel. + width : number + Width of the ring kernel. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + See Also + -------- + Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, + Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Ring2DKernel + ring_2D_kernel = Ring2DKernel(9, 8) + plt.imshow(ring_2D_kernel, interpolation='none', origin='lower') + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + """ + def __init__(self, radius_in, width, **kwargs): + radius_out = radius_in + width + self._model = models.Ring2D(1. / (np.pi * (radius_out ** 2 - radius_in ** 2)), + 0, 0, radius_in, width) + self._default_size = _round_up_to_odd_integer(2 * radius_out) + super(Ring2DKernel, self).__init__(**kwargs) + self._truncation = 0 + + +class Trapezoid1DKernel(Kernel1D): + """ + 1D trapezoid kernel. + + Parameters + ---------- + width : number + Width of the filter kernel, defined as the width of the constant part, + before it begins to slope down. + slope : number + Slope of the filter kernel's tails + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by linearly interpolating + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + See Also + -------- + Box1DKernel, Gaussian1DKernel, MexicanHat1DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Trapezoid1DKernel + trapezoid_1D_kernel = Trapezoid1DKernel(17, slope=0.2) + plt.plot(trapezoid_1D_kernel, drawstyle='steps') + plt.xlabel('x [pixels]') + plt.ylabel('amplitude') + plt.xlim(-1, 28) + plt.show() + """ + _is_bool = False + + def __init__(self, width, slope=1., **kwargs): + self._model = models.Trapezoid1D(1, 0, width, slope) + self._default_size = _round_up_to_odd_integer(width + 2. / slope) + super(Trapezoid1DKernel, self).__init__(**kwargs) + self._truncation = 0 + self.normalize() + + +class TrapezoidDisk2DKernel(Kernel2D): + """ + 2D trapezoid kernel. + + Parameters + ---------- + radius : number + Width of the filter kernel, defined as the width of the constant part, + before it begins to slope down. + slope : number + Slope of the filter kernel's tails + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + See Also + -------- + Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, + Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import TrapezoidDisk2DKernel + trapezoid_2D_kernel = TrapezoidDisk2DKernel(20, slope=0.2) + plt.imshow(trapezoid_2D_kernel, interpolation='none', origin='lower') + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + + """ + _is_bool = False + + def __init__(self, radius, slope=1., **kwargs): + self._model = models.TrapezoidDisk2D(1, 0, 0, radius, slope) + self._default_size = _round_up_to_odd_integer(2 * radius + 2. / slope) + super(TrapezoidDisk2DKernel, self).__init__(**kwargs) + self._truncation = 0 + self.normalize() + + +class MexicanHat1DKernel(Kernel1D): + """ + 1D Mexican hat filter kernel. + + The Mexican Hat, or inverted Gaussian-Laplace filter, is a + bandpass filter. It smoothes the data and removes slowly varying + or constant structures (e.g. Background). It is useful for peak or + multi-scale detection. + + This kernel is derived from a normalized Gaussian function, by + computing the second derivative. This results in an amplitude + at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The + normalization is the same as for `scipy.ndimage.gaussian_laplace`, + except for a minus sign. + + Parameters + ---------- + width : number + Width of the filter kernel, defined as the standard deviation + of the Gaussian function from which it is derived. + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * width. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by linearly interpolating + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + + See Also + -------- + Box1DKernel, Gaussian1DKernel, Trapezoid1DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import MexicanHat1DKernel + mexicanhat_1D_kernel = MexicanHat1DKernel(10) + plt.plot(mexicanhat_1D_kernel, drawstyle='steps') + plt.xlabel('x [pixels]') + plt.ylabel('value') + plt.show() + + """ + _is_bool = True + + def __init__(self, width, **kwargs): + amplitude = 1.0 / (np.sqrt(2 * np.pi) * width ** 3) + self._model = models.MexicanHat1D(amplitude, 0, width) + self._default_size = _round_up_to_odd_integer(8 * width) + super(MexicanHat1DKernel, self).__init__(**kwargs) + self._truncation = np.abs(self._array.sum() / self._array.size) + + +class MexicanHat2DKernel(Kernel2D): + """ + 2D Mexican hat filter kernel. + + The Mexican Hat, or inverted Gaussian-Laplace filter, is a + bandpass filter. It smoothes the data and removes slowly varying + or constant structures (e.g. Background). It is useful for peak or + multi-scale detection. + + This kernel is derived from a normalized Gaussian function, by + computing the second derivative. This results in an amplitude + at the kernels center of 1. / (pi * width ** 4). The normalization + is the same as for `scipy.ndimage.gaussian_laplace`, except + for a minus sign. + + Parameters + ---------- + width : number + Width of the filter kernel, defined as the standard deviation + of the Gaussian function from which it is derived. + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * width. + y_size : odd int, optional + Size in y direction of the kernel array. Default = 8 * width. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + + See Also + -------- + Gaussian2DKernel, Box2DKernel, Tophat2DKernel, Ring2DKernel, + TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import MexicanHat2DKernel + mexicanhat_2D_kernel = MexicanHat2DKernel(10) + plt.imshow(mexicanhat_2D_kernel, interpolation='none', origin='lower') + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + """ + _is_bool = False + + def __init__(self, width, **kwargs): + amplitude = 1.0 / (np.pi * width ** 4) + self._model = models.MexicanHat2D(amplitude, 0, 0, width) + self._default_size = _round_up_to_odd_integer(8 * width) + super(MexicanHat2DKernel, self).__init__(**kwargs) + self._truncation = np.abs(self._array.sum() / self._array.size) + + +class AiryDisk2DKernel(Kernel2D): + """ + 2D Airy disk kernel. + + This kernel models the diffraction pattern of a circular aperture. This + kernel is normalized to a peak value of 1. + + Parameters + ---------- + radius : float + The radius of the Airy disk kernel (radius of the first zero). + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * radius. + y_size : odd int, optional + Size in y direction of the kernel array. Default = 8 * radius. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + See Also + -------- + Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, + Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import AiryDisk2DKernel + airydisk_2D_kernel = AiryDisk2DKernel(10) + plt.imshow(airydisk_2D_kernel, interpolation='none', origin='lower') + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + """ + _is_bool = False + + def __init__(self, radius, **kwargs): + self._model = models.AiryDisk2D(1, 0, 0, radius) + self._default_size = _round_up_to_odd_integer(8 * radius) + super(AiryDisk2DKernel, self).__init__(**kwargs) + self.normalize() + self._truncation = None + + +class Moffat2DKernel(Kernel2D): + """ + 2D Moffat kernel. + + This kernel is a typical model for a seeing limited PSF. + + Parameters + ---------- + gamma : float + Core width of the Moffat model. + alpha : float + Power index of the Moffat model. + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * radius. + y_size : odd int, optional + Size in y direction of the kernel array. Default = 8 * radius. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + See Also + -------- + Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, + Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel + + Examples + -------- + Kernel response: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from astropy.convolution import Moffat2DKernel + moffat_2D_kernel = Moffat2DKernel(3, 2) + plt.imshow(moffat_2D_kernel, interpolation='none', origin='lower') + plt.xlabel('x [pixels]') + plt.ylabel('y [pixels]') + plt.colorbar() + plt.show() + """ + _is_bool = False + + def __init__(self, gamma, alpha, **kwargs): + self._model = models.Moffat2D((gamma - 1.0) / (np.pi * alpha * alpha), + 0, 0, gamma, alpha) + fwhm = 2.0 * alpha * (2.0 ** (1.0 / gamma) - 1.0) ** 0.5 + self._default_size = _round_up_to_odd_integer(4.0 * fwhm) + super(Moffat2DKernel, self).__init__(**kwargs) + self.normalize() + self._truncation = None + + +class Model1DKernel(Kernel1D): + """ + Create kernel from 1D model. + + The model has to be centered on x = 0. + + Parameters + ---------- + model : `~astropy.modeling.Fittable1DModel` + Kernel response function model + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * width. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by linearly interpolating + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + Raises + ------ + TypeError + If model is not an instance of `~astropy.modeling.Fittable1DModel` + + See also + -------- + Model2DKernel : Create kernel from `~astropy.modeling.Fittable2DModel` + CustomKernel : Create kernel from list or array + + Examples + -------- + Define a Gaussian1D model: + + >>> from astropy.modeling.models import Gaussian1D + >>> from astropy.convolution.kernels import Model1DKernel + >>> gauss = Gaussian1D(1, 0, 2) + + And create a custom one dimensional kernel from it: + + >>> gauss_kernel = Model1DKernel(gauss, x_size=9) + + This kernel can now be used like a usual Astropy kernel. + """ + _separable = False + _is_bool = False + + def __init__(self, model, **kwargs): + if isinstance(model, Fittable1DModel): + self._model = model + else: + raise TypeError("Must be Fittable1DModel") + super(Model1DKernel, self).__init__(**kwargs) + + +class Model2DKernel(Kernel2D): + """ + Create kernel from 2D model. + + The model has to be centered on x = 0 and y = 0. + + Parameters + ---------- + model : `~astropy.modeling.Fittable2DModel` + Kernel response function model + x_size : odd int, optional + Size in x direction of the kernel array. Default = 8 * width. + y_size : odd int, optional + Size in y direction of the kernel array. Default = 8 * width. + mode : str, optional + One of the following discretization modes: + * 'center' (default) + Discretize model by taking the value + at the center of the bin. + * 'linear_interp' + Discretize model by performing a bilinear interpolation + between the values at the corners of the bin. + * 'oversample' + Discretize model by taking the average + on an oversampled grid. + * 'integrate' + Discretize model by integrating the + model over the bin. + factor : number, optional + Factor of oversampling. Default factor = 10. + + Raises + ------ + TypeError + If model is not an instance of `~astropy.modeling.Fittable2DModel` + + See also + -------- + Model1DKernel : Create kernel from `~astropy.modeling.Fittable1DModel` + CustomKernel : Create kernel from list or array + + Examples + -------- + Define a Gaussian2D model: + + >>> from astropy.modeling.models import Gaussian2D + >>> from astropy.convolution.kernels import Model2DKernel + >>> gauss = Gaussian2D(1, 0, 0, 2, 2) + + And create a custom two dimensional kernel from it: + + >>> gauss_kernel = Model2DKernel(gauss, x_size=9) + + This kernel can now be used like a usual astropy kernel. + + """ + _is_bool = False + _separable = False + + def __init__(self, model, **kwargs): + self._separable = False + if isinstance(model, Fittable2DModel): + self._model = model + else: + raise TypeError("Must be Fittable2DModel") + super(Model2DKernel, self).__init__(**kwargs) + + +class PSFKernel(Kernel2D): + """ + Initialize filter kernel from astropy PSF instance. + """ + _separable = False + + def __init__(self): + raise NotImplementedError('Not yet implemented') + + +class CustomKernel(Kernel): + """ + Create filter kernel from list or array. + + Parameters + ---------- + array : list or array + Filter kernel array. Size must be odd. + + Raises + ------ + TypeError + If array is not a list or array. + KernelSizeError + If array size is even. + + See also + -------- + Model2DKernel, Model1DKernel + + Examples + -------- + Define one dimensional array: + + >>> from astropy.convolution.kernels import CustomKernel + >>> import numpy as np + >>> array = np.array([1, 2, 3, 2, 1]) + >>> kernel = CustomKernel(array) + >>> kernel.dimension + 1 + + Define two dimensional array: + + >>> array = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]]) + >>> kernel = CustomKernel(array) + >>> kernel.dimension + 2 + """ + def __init__(self, array): + self.array = array + super(CustomKernel, self).__init__(self._array) + + @property + def array(self): + """ + Filter kernel array. + """ + return self._array + + @array.setter + def array(self, array): + """ + Filter kernel array setter + """ + if isinstance(array, np.ndarray): + self._array = array.astype(np.float64) + elif isinstance(array, list): + self._array = np.array(array, dtype=np.float64) + else: + raise TypeError("Must be list or array.") + + # Check if array is odd in all axes + odd = all(axes_size % 2 != 0 for axes_size in self.shape) + if not odd: + raise KernelSizeError("Kernel size must be odd in all axes.") + + # Check if array is bool + ones = self._array == 1. + zeros = self._array == 0 + self._is_bool = bool(np.all(np.logical_or(ones, zeros))) + + self._truncation = 0.0 diff --git a/astropy/convolution/setup_package.py b/astropy/convolution/setup_package.py new file mode 100644 index 0000000..3cd9f7c --- /dev/null +++ b/astropy/convolution/setup_package.py @@ -0,0 +1,5 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + + +def requires_2to3(): + return False diff --git a/astropy/convolution/tests/__init__.py b/astropy/convolution/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/astropy/convolution/tests/test_convolve.py b/astropy/convolution/tests/test_convolve.py new file mode 100644 index 0000000..cc236fc --- /dev/null +++ b/astropy/convolution/tests/test_convolve.py @@ -0,0 +1,755 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest +import numpy as np + +from ..convolve import convolve, convolve_fft + +from numpy.testing import assert_array_almost_equal_nulp, assert_array_almost_equal + +import itertools + +VALID_DTYPES = [] +for dtype_array in ['>f4', 'f8', 'f4', 'f8', 'f8'), [3, 3, 3]), 10) + elif boundary == 'extend': + assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.], + [72., 63., 54.], + [82., 75., 68.]], + [[93., 68., 43.], + [105., 78., 51.], + [117., 88., 59.]], + [[124., 85., 46.], + [138., 93., 48.], + [152., 101., 50.]]], + dtype='>f8')/kernsum, 10) + else: + raise ValueError("Invalid Boundary Option") + + +@pytest.mark.parametrize(('convfunc', 'boundary'), BOUNDARIES_AND_CONVOLUTIONS) +def test_asymmetric_kernel(boundary, convfunc): + ''' + Regression test for #6264: make sure that asymmetric convolution + functions go the right direction + ''' + + x = np.array([3., 0., 1.], dtype='>f8') + + y = np.array([1, 2, 3], dtype='>f8') + + z = convolve(x, y, boundary=boundary, normalize_kernel=False) + + if boundary == 'fill': + assert_array_almost_equal_nulp(z, np.array([6., 10., 2.], dtype='float'), 10) + elif boundary is None: + assert_array_almost_equal_nulp(z, np.array([0., 10., 0.], dtype='float'), 10) + elif boundary == 'extend': + assert_array_almost_equal_nulp(z, np.array([15., 10., 3.], dtype='float'), 10) + elif boundary == 'wrap': + assert_array_almost_equal_nulp(z, np.array([9., 10., 5.], dtype='float'), 10) + + +@pytest.mark.parametrize('ndims', (1, 2, 3)) +def test_convolution_consistency(ndims): + + np.random.seed(0) + array = np.random.randn(*([3]*ndims)) + np.random.seed(0) + kernel = np.random.rand(*([3]*ndims)) + + conv_f = convolve_fft(array, kernel, boundary='fill') + conv_d = convolve(array, kernel, boundary='fill') + + assert_array_almost_equal_nulp(conv_f, conv_d, 30) + + +def test_astropy_convolution_against_numpy(): + x = np.array([1, 2, 3]) + y = np.array([5, 4, 3, 2, 1]) + + assert_array_almost_equal(np.convolve(y, x, 'same'), + convolve(y, x, normalize_kernel=False)) + assert_array_almost_equal(np.convolve(y, x, 'same'), + convolve_fft(y, x, normalize_kernel=False)) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_astropy_convolution_against_scipy(): + from scipy.signal import fftconvolve + x = np.array([1, 2, 3]) + y = np.array([5, 4, 3, 2, 1]) + + assert_array_almost_equal(fftconvolve(y, x, 'same'), + convolve(y, x, normalize_kernel=False)) + assert_array_almost_equal(fftconvolve(y, x, 'same'), + convolve_fft(y, x, normalize_kernel=False)) diff --git a/astropy/convolution/tests/test_convolve_fft.py b/astropy/convolution/tests/test_convolve_fft.py new file mode 100644 index 0000000..5bb6526 --- /dev/null +++ b/astropy/convolution/tests/test_convolve_fft.py @@ -0,0 +1,580 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +import pytest +import numpy as np +from numpy.testing import assert_array_almost_equal_nulp, assert_allclose + +from ..convolve import convolve_fft +from ...tests.helper import catch_warnings +from ...utils.exceptions import AstropyUserWarning + + +VALID_DTYPES = [] +for dtype_array in ['>f4', 'f8', 'f4', 'f8', ' a, z, a)) * 10) + + @pytest.mark.parametrize(option_names, options) + def test_unity_3x3_withnan(self, boundary, nan_treatment, + normalize_kernel): + ''' + Test that a 3x3 unit kernel returns the same array (except when + boundary is None). This version includes a NaN value in the original + array. + ''' + + x = np.array([[1., 2., 3.], + [4., np.nan, 6.], + [7., 8., 9.]], dtype='float64') + + y = np.array([[0., 0., 0.], + [0., 1., 0.], + [0., 0., 0.]], dtype='float64') + + z = convolve_fft(x, y, boundary=boundary, + nan_treatment=nan_treatment, + normalize_kernel=normalize_kernel, + ) + + a = x + a[1, 1] = 0 + + # for whatever reason, numpy's fft has very limited precision, and + # the comparison fails unless you cast the float64 to a float16 + if hasattr(np, 'float16'): + assert_array_almost_equal_nulp(np.asarray(z, dtype=np.float16), + np.asarray(a, dtype=np.float16), 10) + assert_allclose(z, a, atol=1e-14) + + @pytest.mark.parametrize(option_names, options) + def test_uniform_3x3_withnan(self, boundary, nan_treatment, + normalize_kernel): + ''' + Test that the different modes are producing the correct results using + a 3x3 uniform kernel. This version includes a NaN value in the + original array. + ''' + + x = np.array([[0., 0., 3.], + [1., np.nan, 0.], + [0., 2., 0.]], dtype='float64') + + y = np.array([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]], dtype='float64') + + # commented out: allow unnormalized nan-ignoring convolution + # # kernel is not normalized, so this situation -> exception + # if nan_treatment and not normalize_kernel: + # with pytest.raises(ValueError): + # z = convolve_fft(x, y, boundary=boundary, + # nan_treatment=nan_treatment, + # normalize_kernel=normalize_kernel, + # ignore_edge_zeros=ignore_edge_zeros, + # ) + # return + + z = convolve_fft(x, y, boundary=boundary, + nan_treatment=nan_treatment, + fill_value=np.nan if normalize_kernel else 0, + normalize_kernel=normalize_kernel) + + # weights + w_n = np.array([[3., 5., 3.], + [5., 8., 5.], + [3., 5., 3.]], dtype='float64') + w_z = np.array([[4., 6., 4.], + [6., 9., 6.], + [4., 6., 4.]], dtype='float64') + answer_dict = { + 'sum': np.array([[1., 4., 3.], + [3., 6., 5.], + [3., 3., 2.]], dtype='float64'), + 'sum_wrap': np.array([[6., 6., 6.], + [6., 6., 6.], + [6., 6., 6.]], dtype='float64'), + } + answer_dict['average'] = answer_dict['sum'] / w_z + answer_dict['average_interpnan'] = answer_dict['sum'] / w_n + answer_dict['average_wrap_interpnan'] = answer_dict['sum_wrap'] / 8. + answer_dict['average_wrap'] = answer_dict['sum_wrap'] / 9. + answer_dict['average_withzeros'] = answer_dict['sum'] / 9. + answer_dict['average_withzeros_interpnan'] = answer_dict['sum'] / 8. + answer_dict['sum_withzeros'] = answer_dict['sum'] + answer_dict['sum_interpnan'] = answer_dict['sum'] * 9/8. + answer_dict['sum_withzeros_interpnan'] = answer_dict['sum'] + answer_dict['sum_wrap_interpnan'] = answer_dict['sum_wrap'] * 9/8. + + if normalize_kernel: + answer_key = 'average' + else: + answer_key = 'sum' + + if boundary == 'wrap': + answer_key += '_wrap' + elif nan_treatment == 'fill': + answer_key += '_withzeros' + + if nan_treatment == 'interpolate': + answer_key += '_interpnan' + + a = answer_dict[answer_key] + # for reasons unknown, the Windows FFT returns an answer for the [0, 0] + # component that is EXACTLY 10*np.spacing + assert np.all(np.abs(z - a) <= np.spacing(np.where(z > a, z, a)) * 10) + + def test_big_fail(self): + """ Test that convolve_fft raises an exception if a too-large array is passed in """ + + with pytest.raises((ValueError, MemoryError)): + # while a good idea, this approach did not work; it actually writes to disk + # arr = np.memmap('file.np', mode='w+', shape=(512, 512, 512), dtype=np.complex) + # this just allocates the memory but never touches it; it's better: + arr = np.empty([512, 512, 512], dtype=np.complex) + # note 512**3 * 16 bytes = 2.0 GB + convolve_fft(arr, arr) + + @pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS) + def test_non_normalized_kernel(self, boundary): + + x = np.array([[0., 0., 4.], + [1., 2., 0.], + [0., 3., 0.]], dtype='float') + + y = np.array([[1., -1., 1.], + [-1., 0., -1.], + [1., -1., 1.]], dtype='float') + + z = convolve_fft(x, y, boundary=boundary, nan_treatment='fill', + normalize_kernel=False) + + if boundary in (None, 'fill'): + assert_allclose(z, np.array([[1., -5., 2.], + [1., 0., -3.], + [-2., -1., -1.]], dtype='float'), atol=1e-14) + elif boundary == 'wrap': + assert_allclose(z, np.array([[0., -8., 6.], + [5., 0., -4.], + [2., 3., -4.]], dtype='float'), atol=1e-14) + else: + raise ValueError("Invalid boundary specification") diff --git a/astropy/convolution/tests/test_convolve_kernels.py b/astropy/convolution/tests/test_convolve_kernels.py new file mode 100644 index 0000000..fc23794 --- /dev/null +++ b/astropy/convolution/tests/test_convolve_kernels.py @@ -0,0 +1,130 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +import pytest +import numpy as np +from numpy.testing import assert_almost_equal + +from ..convolve import convolve, convolve_fft +from ..kernels import Gaussian2DKernel, Box2DKernel, Tophat2DKernel +from ..kernels import Moffat2DKernel + + +SHAPES_ODD = [[15, 15], [31, 31]] +SHAPES_EVEN = [[8, 8], [16, 16], [32, 32]] +WIDTHS = [2, 3, 4, 5] + +KERNELS = [] + +for shape in SHAPES_ODD: + for width in WIDTHS: + + KERNELS.append(Gaussian2DKernel(width, + x_size=shape[0], + y_size=shape[1], + mode='oversample', + factor=10)) + + KERNELS.append(Box2DKernel(width, + x_size=shape[0], + y_size=shape[1], + mode='oversample', + factor=10)) + + KERNELS.append(Tophat2DKernel(width, + x_size=shape[0], + y_size=shape[1], + mode='oversample', + factor=10)) + KERNELS.append(Moffat2DKernel(width, 2, + x_size=shape[0], + y_size=shape[1], + mode='oversample', + factor=10)) + + +class Test2DConvolutions(object): + + @pytest.mark.parametrize('kernel', KERNELS) + def test_centered_makekernel(self, kernel): + """ + Test smoothing of an image with a single positive pixel + """ + + shape = kernel.array.shape + + x = np.zeros(shape) + xslice = [slice(sh // 2, sh // 2 + 1) for sh in shape] + x[xslice] = 1.0 + + c2 = convolve_fft(x, kernel, boundary='fill') + c1 = convolve(x, kernel, boundary='fill') + + assert_almost_equal(c1, c2, decimal=12) + + @pytest.mark.parametrize('kernel', KERNELS) + def test_random_makekernel(self, kernel): + """ + Test smoothing of an image made of random noise + """ + + shape = kernel.array.shape + + x = np.random.randn(*shape) + + c2 = convolve_fft(x, kernel, boundary='fill') + c1 = convolve(x, kernel, boundary='fill') + + # not clear why, but these differ by a couple ulps... + assert_almost_equal(c1, c2, decimal=12) + + @pytest.mark.parametrize(('shape', 'width'), list(itertools.product(SHAPES_ODD, WIDTHS))) + def test_uniform_smallkernel(self, shape, width): + """ + Test smoothing of an image with a single positive pixel + + Uses a simple, small kernel + """ + + if width % 2 == 0: + # convolve does not accept odd-shape kernels + return + + kernel = np.ones([width, width]) + + x = np.zeros(shape) + xslice = [slice(sh // 2, sh // 2 + 1) for sh in shape] + x[xslice] = 1.0 + + c2 = convolve_fft(x, kernel, boundary='fill') + c1 = convolve(x, kernel, boundary='fill') + + assert_almost_equal(c1, c2, decimal=12) + + @pytest.mark.parametrize(('shape', 'width'), list(itertools.product(SHAPES_ODD, [1, 3, 5]))) + def test_smallkernel_Box2DKernel(self, shape, width): + """ + Test smoothing of an image with a single positive pixel + + Compares a small uniform kernel to the Box2DKernel + """ + + kernel1 = np.ones([width, width]) / np.float(width) ** 2 + kernel2 = Box2DKernel(width, mode='oversample', factor=10) + + x = np.zeros(shape) + xslice = [slice(sh // 2, sh // 2 + 1) for sh in shape] + x[xslice] = 1.0 + + c2 = convolve_fft(x, kernel2, boundary='fill') + c1 = convolve_fft(x, kernel1, boundary='fill') + + assert_almost_equal(c1, c2, decimal=12) + + c2 = convolve(x, kernel2, boundary='fill') + c1 = convolve(x, kernel1, boundary='fill') + + assert_almost_equal(c1, c2, decimal=12) diff --git a/astropy/convolution/tests/test_convolve_models.py b/astropy/convolution/tests/test_convolve_models.py new file mode 100644 index 0000000..2b54d30 --- /dev/null +++ b/astropy/convolution/tests/test_convolve_models.py @@ -0,0 +1,107 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import math +import numpy as np +import pytest + +from ..convolve import convolve, convolve_fft, convolve_models +from ...modeling import models, fitting +from ...utils.misc import NumpyRNGContext +from numpy.testing import assert_allclose, assert_almost_equal + +try: + import scipy +except ImportError: + HAS_SCIPY = False +else: + HAS_SCIPY = True + + +class TestConvolve1DModels(object): + @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) + @pytest.mark.skipif('not HAS_SCIPY') + def test_is_consistency_with_astropy_convolution(self, mode): + kernel = models.Gaussian1D(1, 0, 1) + model = models.Gaussian1D(1, 0, 1) + model_conv = convolve_models(model, kernel, mode=mode) + x = np.arange(-5, 6) + ans = eval("{}(model(x), kernel(x))".format(mode)) + + assert_allclose(ans, model_conv(x), atol=1e-5) + + @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) + @pytest.mark.skipif('not HAS_SCIPY') + def test_against_scipy(self, mode): + from scipy.signal import fftconvolve + + kernel = models.Gaussian1D(1, 0, 1) + model = models.Gaussian1D(1, 0, 1) + model_conv = convolve_models(model, kernel, mode=mode) + x = np.arange(-5, 6) + ans = fftconvolve(kernel(x), model(x), mode='same') + + assert_allclose(ans, model_conv(x) * kernel(x).sum(), atol=1e-5) + + @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) + @pytest.mark.skipif('not HAS_SCIPY') + def test_against_scipy_with_additional_keywords(self, mode): + from scipy.signal import fftconvolve + + kernel = models.Gaussian1D(1, 0, 1) + model = models.Gaussian1D(1, 0, 1) + model_conv = convolve_models(model, kernel, mode=mode, + normalize_kernel=False) + x = np.arange(-5, 6) + ans = fftconvolve(kernel(x), model(x), mode='same') + + assert_allclose(ans, model_conv(x), atol=1e-5) + + @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) + def test_sum_of_gaussians(self, mode): + """ + Test that convolving N(a, b) with N(c, d) gives N(a + c, b + d), + where N(., .) stands for Gaussian probability density function, + in which a and c are their means and b and d are their variances. + """ + + kernel = models.Gaussian1D(1 / math.sqrt(2 * np.pi), 1, 1) + model = models.Gaussian1D(1 / math.sqrt(2 * np.pi), 3, 1) + model_conv = convolve_models(model, kernel, mode=mode, + normalize_kernel=False) + ans = models.Gaussian1D(1 / (2 * math.sqrt(np.pi)), 4, np.sqrt(2)) + x = np.arange(-5, 6) + + assert_allclose(ans(x), model_conv(x), atol=1e-3) + + @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) + def test_convolve_box_models(self, mode): + kernel = models.Box1D() + model = models.Box1D() + model_conv = convolve_models(model, kernel, mode=mode) + x = np.linspace(-1, 1, 99) + ans = (x + 1) * (x < 0) + (-x + 1) * (x >= 0) + + assert_allclose(ans, model_conv(x), atol=1e-3) + + @pytest.mark.parametrize('mode', ['convolve_fft', 'convolve']) + @pytest.mark.skipif('not HAS_SCIPY') + def test_fitting_convolve_models(self, mode): + """ + test that a convolve model can be fitted + """ + b1 = models.Box1D() + g1 = models.Gaussian1D() + + x = np.linspace(-5, 5, 99) + fake_model = models.Gaussian1D(amplitude=10) + with NumpyRNGContext(123): + fake_data = fake_model(x) + np.random.normal(size=len(x)) + + init_model = convolve_models(b1, g1, mode=mode, normalize_kernel=False) + fitter = fitting.LevMarLSQFitter() + fitted_model = fitter(init_model, x, fake_data) + + me = np.mean(fitted_model(x) - fake_data) + assert_almost_equal(me, 0.0, decimal=2) diff --git a/astropy/convolution/tests/test_convolve_nddata.py b/astropy/convolution/tests/test_convolve_nddata.py new file mode 100644 index 0000000..93e801b --- /dev/null +++ b/astropy/convolution/tests/test_convolve_nddata.py @@ -0,0 +1,58 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest +import numpy as np + +from ..convolve import convolve, convolve_fft +from ..kernels import Gaussian2DKernel +from ...nddata import NDData + + +def test_basic_nddata(): + arr = np.zeros((11, 11)) + arr[5, 5] = 1 + ndd = NDData(arr) + test_kernel = Gaussian2DKernel(1) + + result = convolve(ndd, test_kernel) + + x, y = np.mgrid[:11, :11] + expected = result[5, 5] * np.exp(-0.5 * ((x - 5)**2 + (y - 5)**2)) + + np.testing.assert_allclose(result, expected, atol=1e-6) + + resultf = convolve_fft(ndd, test_kernel) + np.testing.assert_allclose(resultf, expected, atol=1e-6) + + +@pytest.mark.parametrize('convfunc', + [lambda *args: convolve(*args, nan_treatment='interpolate', normalize_kernel=True), + lambda *args: convolve_fft(*args, nan_treatment='interpolate', normalize_kernel=True)]) +def test_masked_nddata(convfunc): + arr = np.zeros((11, 11)) + arr[4, 5] = arr[6, 5] = arr[5, 4] = arr[5, 6] = 0.2 + arr[5, 5] = 1.5 + ndd_base = NDData(arr) + + mask = arr < 0 # this is all False + mask[5, 5] = True + ndd_mask = NDData(arr, mask=mask) + + arrnan = arr.copy() + arrnan[5, 5] = np.nan + ndd_nan = NDData(arrnan) + + test_kernel = Gaussian2DKernel(1) + + result_base = convfunc(ndd_base, test_kernel) + result_nan = convfunc(ndd_nan, test_kernel) + result_mask = convfunc(ndd_mask, test_kernel) + + assert np.allclose(result_nan, result_mask) + assert not np.allclose(result_base, result_mask) + assert not np.allclose(result_base, result_nan) + + # check to make sure the mask run doesn't talk back to the initial array + assert np.sum(np.isnan(ndd_base.data)) != np.sum(np.isnan(ndd_nan.data)) diff --git a/astropy/convolution/tests/test_convolve_speeds.py b/astropy/convolution/tests/test_convolve_speeds.py new file mode 100644 index 0000000..8ae3974 --- /dev/null +++ b/astropy/convolution/tests/test_convolve_speeds.py @@ -0,0 +1,187 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import timeit + +import numpy as np # pylint: disable=W0611 + +from ...extern.six.moves import range, zip + +# largest image size to use for "linear" and fft convolutions +max_exponents_linear = {1: 15, 2: 7, 3: 5} +max_exponents_fft = {1: 15, 2: 10, 3: 7} + +if __name__ == "__main__": + for ndims in [1, 2, 3]: + print("\n{}-dimensional arrays ('n' is the size of the image AND " + "the kernel)".format(ndims)) + print(" ".join(["%17s" % n for n in ("n", "convolve", "convolve_fft")])) + + for ii in range(3, max_exponents_fft[ndims]): + # array = np.random.random([2**ii]*ndims) + # test ODD sizes too + if ii < max_exponents_fft[ndims]: + setup = (""" +import numpy as np +from astropy.convolution.convolve import convolve +from astropy.convolution.convolve import convolve_fft +array = np.random.random([%i]*%i) +kernel = np.random.random([%i]*%i)""") % (2 ** ii - 1, ndims, 2 ** ii - 1, ndims) + + print("%16i:" % (int(2 ** ii - 1)), end=' ') + + if ii <= max_exponents_linear[ndims]: + for ffttype, extra in zip(("", "_fft"), + ("", "fft_pad=False")): + statement = "convolve{}(array, kernel, boundary='fill', {})".format(ffttype, extra) + besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10)) + print("%17f" % (besttime), end=' ') + else: + print("%17s" % "skipped", end=' ') + statement = "convolve_fft(array, kernel, boundary='fill')" + besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10)) + print("%17f" % (besttime), end=' ') + + print() + + setup = (""" +import numpy as np +from astropy.convolution.convolve import convolve +from astropy.convolution.convolve import convolve_fft +array = np.random.random([%i]*%i) +kernel = np.random.random([%i]*%i)""") % (2 ** ii - 1, ndims, 2 ** ii - 1, ndims) + + print("%16i:" % (int(2 ** ii)), end=' ') + + if ii <= max_exponents_linear[ndims]: + for ffttype in ("", "_fft"): + statement = "convolve{}(array, kernel, boundary='fill')".format(ffttype) + besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10)) + print("%17f" % (besttime), end=' ') + else: + print("%17s" % "skipped", end=' ') + statement = "convolve_fft(array, kernel, boundary='fill')" + besttime = min(timeit.Timer(stmt=statement, setup=setup).repeat(3, 10)) + print("%17f" % (besttime), end=' ') + + print() + +""" +Unfortunately, these tests are pretty strongly inconclusive + +RESULTS on a 2011 Mac Air: +1-dimensional arrays ('n' is the size of the image AND the kernel) + n convolve convolve_fftnp convolve_fftw convolve_fftsp + 7: 0.000408 0.002334 0.005571 0.002677 + 8: 0.000399 0.002818 0.006505 0.003094 + 15: 0.000361 0.002491 0.005648 0.002678 + 16: 0.000371 0.002997 0.005983 0.003036 + 31: 0.000535 0.002450 0.005988 0.002880 + 32: 0.000452 0.002618 0.007102 0.004366 + 63: 0.000509 0.002876 0.008003 0.002981 + 64: 0.000453 0.002706 0.005520 0.003049 + 127: 0.000801 0.004080 0.008513 0.003932 + 128: 0.000749 0.003332 0.006236 0.003159 + 255: 0.002453 0.003111 0.007518 0.003564 + 256: 0.002478 0.003341 0.006325 0.004290 + 511: 0.008394 0.006224 0.010247 0.005991 + 512: 0.007934 0.003764 0.006840 0.004106 + 1023: 0.028741 0.007538 0.009591 0.007696 + 1024: 0.027900 0.004871 0.009628 0.005118 + 2047: 0.106323 0.021575 0.022041 0.020682 + 2048: 0.108916 0.008107 0.011049 0.007596 + 4095: 0.411936 0.021675 0.019761 0.020939 + 4096: 0.408992 0.018870 0.016663 0.012890 + 8191: 1.664517 8.278320 0.073001 7.803563 + 8192: 1.657573 0.037967 0.034227 0.028390 + 16383: 6.654678 0.251661 0.202271 0.222171 + 16384: 6.611977 0.073630 0.067616 0.055591 + +2-dimensional arrays ('n' is the size of the image AND the kernel) + n convolve convolve_fftnp convolve_fftw convolve_fftsp + 7: 0.000552 0.003524 0.006667 0.004318 + 8: 0.000646 0.004443 0.007354 0.003958 + 15: 0.002986 0.005093 0.012941 0.005951 + 16: 0.003549 0.005688 0.008818 0.006300 + 31: 0.074360 0.033973 0.031800 0.036937 + 32: 0.077338 0.017708 0.025637 0.011883 + 63: 0.848471 0.057407 0.052192 0.053213 + 64: 0.773061 0.029657 0.033409 0.028230 + 127: 14.656414 1.005329 0.402113 0.955279 + 128: 15.867796 0.266233 0.268551 0.237930 + 255: skipped 1.715546 1.566876 1.745338 + 256: skipped 1.515616 1.268220 1.036881 + 511: skipped 4.066155 4.303350 3.930661 + 512: skipped 3.976139 4.337525 3.968935 + +3-dimensional arrays ('n' is the size of the image AND the kernel) + n convolve convolve_fftnp convolve_fftw convolve_fftsp + 7: 0.009239 0.012957 0.011957 0.015997 + 8: 0.012405 0.011328 0.011677 0.012283 + 15: 0.772434 0.075621 0.056711 0.079508 + 16: 0.964635 0.105846 0.072811 0.104611 + 31: 62.824051 2.295193 1.189505 2.351136 + 32: 79.507060 1.169182 0.821779 1.275770 + 63: skipped 11.250225 10.982726 10.585744 + 64: skipped 10.013558 11.507645 12.665557 + + + +On a 2009 Mac Pro: +1-dimensional arrays ('n' is the size of the image AND the kernel) + n convolve convolve_fftnp convolve_fftw convolve_fftsp + 7: 0.000360 0.002269 0.004986 0.002476 + 8: 0.000361 0.002468 0.005242 0.002696 + 15: 0.000364 0.002255 0.005244 0.002471 + 16: 0.000365 0.002506 0.005286 0.002727 + 31: 0.000385 0.002380 0.005422 0.002588 + 32: 0.000385 0.002531 0.005543 0.002737 + 63: 0.000474 0.002407 0.005392 0.002637 + 64: 0.000484 0.002602 0.005631 0.002823 + 127: 0.000752 0.004122 0.007827 0.003966 + 128: 0.000757 0.002763 0.005844 0.002958 + 255: 0.004316 0.003258 0.006566 0.003324 + 256: 0.004354 0.003180 0.006120 0.003245 + 511: 0.011517 0.007158 0.009898 0.006238 + 512: 0.011482 0.003873 0.006777 0.003820 + 1023: 0.034105 0.009211 0.009468 0.008260 + 1024: 0.034609 0.005504 0.008399 0.005080 + 2047: 0.113620 0.028097 0.020662 0.021603 + 2048: 0.112828 0.008403 0.010939 0.007331 + 4095: 0.403373 0.023211 0.018767 0.020065 + 4096: 0.403316 0.017550 0.017853 0.013651 + 8191: 1.519329 8.454573 0.211436 7.212381 + 8192: 1.519082 0.033148 0.030370 0.025905 + 16383: 5.887481 0.317428 0.153344 0.237119 + 16384: 5.888222 0.069379 0.065264 0.052847 + +2-dimensional arrays ('n' is the size of the image AND the kernel) + n convolve convolve_fftnp convolve_fftw convolve_fftsp + 7: 0.000474 0.003470 0.006131 0.003503 + 8: 0.000503 0.003565 0.006400 0.003586 + 15: 0.002011 0.004481 0.007825 0.004496 + 16: 0.002236 0.004744 0.007078 0.004680 + 31: 0.027291 0.019433 0.014841 0.018034 + 32: 0.029283 0.009244 0.010161 0.008964 + 63: 0.445680 0.038171 0.026753 0.037404 + 64: 0.460616 0.028128 0.029487 0.029149 + 127: 7.003774 0.925921 0.282591 0.762671 + 128: 7.063657 0.110838 0.104402 0.133523 + 255: skipped 0.804682 0.708849 0.869368 + 256: skipped 0.797800 0.721042 0.880848 + 511: skipped 3.643626 3.687562 4.584770 + 512: skipped 3.715215 4.893539 5.538462 + +3-dimensional arrays ('n' is the size of the image AND the kernel) + n convolve convolve_fftnp convolve_fftw convolve_fftsp + 7: 0.004520 0.011519 0.009464 0.012335 + 8: 0.006422 0.010294 0.010220 0.011711 + 15: 0.329566 0.060978 0.045495 0.073692 + 16: 0.405275 0.069999 0.040659 0.086114 + 31: 24.935228 1.654920 0.710509 1.773879 + 32: 27.524226 0.724053 0.543507 1.027568 + 63: skipped 8.982771 12.407683 16.900078 + 64: skipped 8.956070 11.934627 17.296447 + +""" diff --git a/astropy/convolution/tests/test_discretize.py b/astropy/convolution/tests/test_discretize.py new file mode 100644 index 0000000..388c088 --- /dev/null +++ b/astropy/convolution/tests/test_discretize.py @@ -0,0 +1,198 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +import pytest +import numpy as np +from numpy.testing import assert_allclose + +from ..utils import discretize_model +from ...modeling.functional_models import ( + Gaussian1D, Box1D, MexicanHat1D, Gaussian2D, Box2D, MexicanHat2D) +from ...modeling.tests.example_models import models_1D, models_2D +from ...modeling.tests.test_models import create_model + +try: + import scipy # pylint: disable=W0611 + HAS_SCIPY = True +except ImportError: + HAS_SCIPY = False + + +modes = ['center', 'linear_interp', 'oversample'] +test_models_1D = [Gaussian1D, Box1D, MexicanHat1D] +test_models_2D = [Gaussian2D, Box2D, MexicanHat2D] + + +@pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_1D, modes))) +def test_pixel_sum_1D(model_class, mode): + """ + Test if the sum of all pixels corresponds nearly to the integral. + """ + if model_class == Box1D and mode == "center": + pytest.skip("Non integrating mode. Skip integral test.") + parameters = models_1D[model_class] + model = create_model(model_class, parameters) + + values = discretize_model(model, models_1D[model_class]['x_lim'], mode=mode) + assert_allclose(values.sum(), models_1D[model_class]['integral'], atol=0.0001) + + +@pytest.mark.parametrize('mode', modes) +def test_gaussian_eval_1D(mode): + """ + Discretize Gaussian with different modes and check + if result is at least similar to Gaussian1D.eval(). + """ + model = Gaussian1D(1, 0, 20) + x = np.arange(-100, 101) + values = model(x) + disc_values = discretize_model(model, (-100, 101), mode=mode) + assert_allclose(values, disc_values, atol=0.001) + + +@pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_2D, modes))) +def test_pixel_sum_2D(model_class, mode): + """ + Test if the sum of all pixels corresponds nearly to the integral. + """ + if model_class == Box2D and mode == "center": + pytest.skip("Non integrating mode. Skip integral test.") + + parameters = models_2D[model_class] + model = create_model(model_class, parameters) + + values = discretize_model(model, models_2D[model_class]['x_lim'], + models_2D[model_class]['y_lim'], mode=mode) + assert_allclose(values.sum(), models_2D[model_class]['integral'], atol=0.0001) + + +@pytest.mark.parametrize('mode', modes) +def test_gaussian_eval_2D(mode): + """ + Discretize Gaussian with different modes and check + if result is at least similar to Gaussian2D.eval() + """ + model = Gaussian2D(0.01, 0, 0, 1, 1) + + x = np.arange(-2, 3) + y = np.arange(-2, 3) + + x, y = np.meshgrid(x, y) + + values = model(x, y) + disc_values = discretize_model(model, (-2, 3), (-2, 3), mode=mode) + assert_allclose(values, disc_values, atol=1e-2) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_gaussian_eval_2D_integrate_mode(): + """ + Discretize Gaussian with integrate mode + """ + model_list = [Gaussian2D(.01, 0, 0, 2, 2), + Gaussian2D(.01, 0, 0, 1, 2), + Gaussian2D(.01, 0, 0, 2, 1)] + + x = np.arange(-2, 3) + y = np.arange(-2, 3) + + x, y = np.meshgrid(x, y) + + for model in model_list: + values = model(x, y) + disc_values = discretize_model(model, (-2, 3), (-2, 3), mode='integrate') + assert_allclose(values, disc_values, atol=1e-2) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_subpixel_gauss_1D(): + """ + Test subpixel accuracy of the integrate mode with gaussian 1D model. + """ + gauss_1D = Gaussian1D(1, 0, 0.1) + values = discretize_model(gauss_1D, (-1, 2), mode='integrate', factor=100) + assert_allclose(values.sum(), np.sqrt(2 * np.pi) * 0.1, atol=0.00001) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_subpixel_gauss_2D(): + """ + Test subpixel accuracy of the integrate mode with gaussian 2D model. + """ + gauss_2D = Gaussian2D(1, 0, 0, 0.1, 0.1) + values = discretize_model(gauss_2D, (-1, 2), (-1, 2), mode='integrate', factor=100) + assert_allclose(values.sum(), 2 * np.pi * 0.01, atol=0.00001) + + +def test_discretize_callable_1d(): + """ + Test discretize when a 1d function is passed. + """ + def f(x): + return x ** 2 + y = discretize_model(f, (-5, 6)) + assert_allclose(y, np.arange(-5, 6) ** 2) + + +def test_discretize_callable_2d(): + """ + Test discretize when a 2d function is passed. + """ + def f(x, y): + return x ** 2 + y ** 2 + actual = discretize_model(f, (-5, 6), (-5, 6)) + y, x = (np.indices((11, 11)) - 5) + desired = x ** 2 + y ** 2 + assert_allclose(actual, desired) + + +def test_type_exception(): + """ + Test type exception. + """ + with pytest.raises(TypeError) as exc: + discretize_model(float(0), (-10, 11)) + assert exc.value.args[0] == 'Model must be callable.' + + +def test_dim_exception_1d(): + """ + Test dimension exception 1d. + """ + def f(x): + return x ** 2 + with pytest.raises(ValueError) as exc: + discretize_model(f, (-10, 11), (-10, 11)) + assert exc.value.args[0] == "y range specified, but model is only 1-d." + + +def test_dim_exception_2d(): + """ + Test dimension exception 2d. + """ + def f(x, y): + return x ** 2 + y ** 2 + with pytest.raises(ValueError) as exc: + discretize_model(f, (-10, 11)) + assert exc.value.args[0] == "y range not specified, but model is 2-d" + + +def test_float_x_range_exception(): + def f(x, y): + return x ** 2 + y ** 2 + with pytest.raises(ValueError) as exc: + discretize_model(f, (-10.002, 11.23)) + assert exc.value.args[0] == ("The difference between the upper an lower" + " limit of 'x_range' must be a whole number.") + + +def test_float_y_range_exception(): + def f(x, y): + return x ** 2 + y ** 2 + with pytest.raises(ValueError) as exc: + discretize_model(f, (-10, 11), (-10.002, 11.23)) + assert exc.value.args[0] == ("The difference between the upper an lower" + " limit of 'y_range' must be a whole number.") diff --git a/astropy/convolution/tests/test_kernel_class.py b/astropy/convolution/tests/test_kernel_class.py new file mode 100644 index 0000000..592923e --- /dev/null +++ b/astropy/convolution/tests/test_kernel_class.py @@ -0,0 +1,522 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import itertools + +import pytest +import numpy as np +from numpy.testing import assert_almost_equal, assert_allclose + +from ..convolve import convolve, convolve_fft +from ..kernels import ( + Gaussian1DKernel, Gaussian2DKernel, Box1DKernel, Box2DKernel, + Trapezoid1DKernel, TrapezoidDisk2DKernel, MexicanHat1DKernel, + Tophat2DKernel, MexicanHat2DKernel, AiryDisk2DKernel, Ring2DKernel, + CustomKernel, Model1DKernel, Model2DKernel, Kernel1D, Kernel2D) + +from ..utils import KernelSizeError +from ...modeling.models import Box2D, Gaussian1D, Gaussian2D +from ...utils.exceptions import AstropyWarning, AstropyUserWarning + +try: + from scipy.ndimage import filters + HAS_SCIPY = True +except ImportError: + HAS_SCIPY = False + +WIDTHS_ODD = [3, 5, 7, 9] +WIDTHS_EVEN = [2, 4, 8, 16] +MODES = ['center', 'linear_interp', 'oversample', 'integrate'] +KERNEL_TYPES = [Gaussian1DKernel, Gaussian2DKernel, + Box1DKernel, Box2DKernel, + Trapezoid1DKernel, TrapezoidDisk2DKernel, + MexicanHat1DKernel, Tophat2DKernel, AiryDisk2DKernel, Ring2DKernel] + + +NUMS = [1, 1., np.float(1.), np.float32(1.), np.float64(1.)] + + +# Test data +delta_pulse_1D = np.zeros(81) +delta_pulse_1D[40] = 1 + +delta_pulse_2D = np.zeros((81, 81)) +delta_pulse_2D[40, 40] = 1 + +random_data_1D = np.random.rand(61) +random_data_2D = np.random.rand(61, 61) + + +class TestKernels(object): + """ + Test class for the built-in convolution kernels. + """ + + @pytest.mark.skipif('not HAS_SCIPY') + @pytest.mark.parametrize(('width'), WIDTHS_ODD) + def test_scipy_filter_gaussian(self, width): + """ + Test GaussianKernel against SciPy ndimage gaussian filter. + """ + gauss_kernel_1D = Gaussian1DKernel(width) + gauss_kernel_1D.normalize() + gauss_kernel_2D = Gaussian2DKernel(width) + gauss_kernel_2D.normalize() + + astropy_1D = convolve(delta_pulse_1D, gauss_kernel_1D, boundary='fill') + astropy_2D = convolve(delta_pulse_2D, gauss_kernel_2D, boundary='fill') + + scipy_1D = filters.gaussian_filter(delta_pulse_1D, width) + scipy_2D = filters.gaussian_filter(delta_pulse_2D, width) + + assert_almost_equal(astropy_1D, scipy_1D, decimal=12) + assert_almost_equal(astropy_2D, scipy_2D, decimal=12) + + @pytest.mark.skipif('not HAS_SCIPY') + @pytest.mark.parametrize(('width'), WIDTHS_ODD) + def test_scipy_filter_gaussian_laplace(self, width): + """ + Test MexicanHat kernels against SciPy ndimage gaussian laplace filters. + """ + mexican_kernel_1D = MexicanHat1DKernel(width) + mexican_kernel_2D = MexicanHat2DKernel(width) + + astropy_1D = convolve(delta_pulse_1D, mexican_kernel_1D, boundary='fill', normalize_kernel=False) + astropy_2D = convolve(delta_pulse_2D, mexican_kernel_2D, boundary='fill', normalize_kernel=False) + + with pytest.raises(Exception) as exc: + astropy_1D = convolve(delta_pulse_1D, mexican_kernel_1D, boundary='fill', normalize_kernel=True) + assert 'sum is close to zero' in exc.value.args[0] + + with pytest.raises(Exception) as exc: + astropy_2D = convolve(delta_pulse_2D, mexican_kernel_2D, boundary='fill', normalize_kernel=True) + assert 'sum is close to zero' in exc.value.args[0] + + # The Laplace of Gaussian filter is an inverted Mexican Hat + # filter. + scipy_1D = -filters.gaussian_laplace(delta_pulse_1D, width) + scipy_2D = -filters.gaussian_laplace(delta_pulse_2D, width) + + # There is a slight deviation in the normalization. They differ by a + # factor of ~1.0000284132604045. The reason is not known. + assert_almost_equal(astropy_1D, scipy_1D, decimal=5) + assert_almost_equal(astropy_2D, scipy_2D, decimal=5) + + @pytest.mark.parametrize(('kernel_type', 'width'), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD))) + def test_delta_data(self, kernel_type, width): + """ + Test smoothing of an image with a single positive pixel + """ + if kernel_type == AiryDisk2DKernel and not HAS_SCIPY: + pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy") + if not kernel_type == Ring2DKernel: + kernel = kernel_type(width) + else: + kernel = kernel_type(width, width * 0.2) + + if kernel.dimension == 1: + c1 = convolve_fft(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False) + c2 = convolve(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False) + assert_almost_equal(c1, c2, decimal=12) + else: + c1 = convolve_fft(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False) + c2 = convolve(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False) + assert_almost_equal(c1, c2, decimal=12) + + @pytest.mark.parametrize(('kernel_type', 'width'), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD))) + def test_random_data(self, kernel_type, width): + """ + Test smoothing of an image made of random noise + """ + if kernel_type == AiryDisk2DKernel and not HAS_SCIPY: + pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy") + if not kernel_type == Ring2DKernel: + kernel = kernel_type(width) + else: + kernel = kernel_type(width, width * 0.2) + + if kernel.dimension == 1: + c1 = convolve_fft(random_data_1D, kernel, boundary='fill', normalize_kernel=False) + c2 = convolve(random_data_1D, kernel, boundary='fill', normalize_kernel=False) + assert_almost_equal(c1, c2, decimal=12) + else: + c1 = convolve_fft(random_data_2D, kernel, boundary='fill', normalize_kernel=False) + c2 = convolve(random_data_2D, kernel, boundary='fill', normalize_kernel=False) + assert_almost_equal(c1, c2, decimal=12) + + @pytest.mark.parametrize(('width'), WIDTHS_ODD) + def test_uniform_smallkernel(self, width): + """ + Test smoothing of an image with a single positive pixel + + Instead of using kernel class, uses a simple, small kernel + """ + kernel = np.ones([width, width]) + + c2 = convolve_fft(delta_pulse_2D, kernel, boundary='fill') + c1 = convolve(delta_pulse_2D, kernel, boundary='fill') + assert_almost_equal(c1, c2, decimal=12) + + @pytest.mark.parametrize(('width'), WIDTHS_ODD) + def test_smallkernel_vs_Box2DKernel(self, width): + """ + Test smoothing of an image with a single positive pixel + """ + kernel1 = np.ones([width, width]) / width ** 2 + kernel2 = Box2DKernel(width) + + c2 = convolve_fft(delta_pulse_2D, kernel2, boundary='fill') + c1 = convolve_fft(delta_pulse_2D, kernel1, boundary='fill') + + assert_almost_equal(c1, c2, decimal=12) + + def test_convolve_1D_kernels(self): + """ + Check if convolving two kernels with each other works correctly. + """ + gauss_1 = Gaussian1DKernel(3) + gauss_2 = Gaussian1DKernel(4) + test_gauss_3 = Gaussian1DKernel(5) + + gauss_3 = convolve(gauss_1, gauss_2) + assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01) + + def test_convolve_2D_kernels(self): + """ + Check if convolving two kernels with each other works correctly. + """ + gauss_1 = Gaussian2DKernel(3) + gauss_2 = Gaussian2DKernel(4) + test_gauss_3 = Gaussian2DKernel(5) + + gauss_3 = convolve(gauss_1, gauss_2) + assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01) + + @pytest.mark.parametrize(('number'), NUMS) + def test_multiply_scalar(self, number): + """ + Check if multiplying a kernel with a scalar works correctly. + """ + gauss = Gaussian1DKernel(3) + gauss_new = number * gauss + assert_almost_equal(gauss_new.array, gauss.array * number, decimal=12) + + @pytest.mark.parametrize(('number'), NUMS) + def test_multiply_scalar_type(self, number): + """ + Check if multiplying a kernel with a scalar works correctly. + """ + gauss = Gaussian1DKernel(3) + gauss_new = number * gauss + assert type(gauss_new) is Gaussian1DKernel + + @pytest.mark.parametrize(('number'), NUMS) + def test_rmultiply_scalar_type(self, number): + """ + Check if multiplying a kernel with a scalar works correctly. + """ + gauss = Gaussian1DKernel(3) + gauss_new = gauss * number + assert type(gauss_new) is Gaussian1DKernel + + def test_multiply_kernel1d(self): + """Test that multiplying two 1D kernels raises an exception.""" + gauss = Gaussian1DKernel(3) + with pytest.raises(Exception): + gauss * gauss + + def test_multiply_kernel2d(self): + """Test that multiplying two 2D kernels raises an exception.""" + gauss = Gaussian2DKernel(3) + with pytest.raises(Exception): + gauss * gauss + + def test_multiply_kernel1d_kernel2d(self): + """ + Test that multiplying a 1D kernel with a 2D kernel raises an + exception. + """ + with pytest.raises(Exception): + Gaussian1DKernel(3) * Gaussian2DKernel(3) + + def test_add_kernel_scalar(self): + """Test that adding a scalar to a kernel raises an exception.""" + with pytest.raises(Exception): + Gaussian1DKernel(3) + 1 + + def test_model_1D_kernel(self): + """ + Check Model1DKernel against Gaussian1Dkernel + """ + stddev = 5. + gauss = Gaussian1D(1. / np.sqrt(2 * np.pi * stddev**2), 0, stddev) + model_gauss_kernel = Model1DKernel(gauss, x_size=21) + gauss_kernel = Gaussian1DKernel(stddev, x_size=21) + assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array, + decimal=12) + + def test_model_2D_kernel(self): + """ + Check Model2DKernel against Gaussian2Dkernel + """ + stddev = 5. + gauss = Gaussian2D(1. / (2 * np.pi * stddev**2), 0, 0, stddev, stddev) + model_gauss_kernel = Model2DKernel(gauss, x_size=21) + gauss_kernel = Gaussian2DKernel(stddev, x_size=21) + assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array, + decimal=12) + + def test_custom_1D_kernel(self): + """ + Check CustomKernel against Box1DKernel. + """ + # Define one dimensional array: + array = np.ones(5) + custom = CustomKernel(array) + custom.normalize() + box = Box1DKernel(5) + + c2 = convolve(delta_pulse_1D, custom, boundary='fill') + c1 = convolve(delta_pulse_1D, box, boundary='fill') + assert_almost_equal(c1, c2, decimal=12) + + def test_custom_2D_kernel(self): + """ + Check CustomKernel against Box2DKernel. + """ + # Define one dimensional array: + array = np.ones((5, 5)) + custom = CustomKernel(array) + custom.normalize() + box = Box2DKernel(5) + + c2 = convolve(delta_pulse_2D, custom, boundary='fill') + c1 = convolve(delta_pulse_2D, box, boundary='fill') + assert_almost_equal(c1, c2, decimal=12) + + def test_custom_1D_kernel_list(self): + """ + Check if CustomKernel works with lists. + """ + custom = CustomKernel([1, 1, 1, 1, 1]) + assert custom.is_bool is True + + def test_custom_2D_kernel_list(self): + """ + Check if CustomKernel works with lists. + """ + custom = CustomKernel([[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]) + assert custom.is_bool is True + + def test_custom_1D_kernel_zerosum(self): + """ + Check if CustomKernel works when the input array/list + sums to zero. + """ + array = [-2, -1, 0, 1, 2] + custom = CustomKernel(array) + custom.normalize() + assert custom.truncation == 0. + assert custom._kernel_sum == 0. + + def test_custom_2D_kernel_zerosum(self): + """ + Check if CustomKernel works when the input array/list + sums to zero. + """ + array = [[0, -1, 0], [-1, 4, -1], [0, -1, 0]] + custom = CustomKernel(array) + custom.normalize() + assert custom.truncation == 0. + assert custom._kernel_sum == 0. + + def test_custom_kernel_odd_error(self): + """ + Check if CustomKernel raises if the array size is odd. + """ + with pytest.raises(KernelSizeError): + CustomKernel([1, 1, 1, 1]) + + def test_add_1D_kernels(self): + """ + Check if adding of two 1D kernels works. + """ + box_1 = Box1DKernel(5) + box_2 = Box1DKernel(3) + box_3 = Box1DKernel(1) + box_sum_1 = box_1 + box_2 + box_3 + box_sum_2 = box_2 + box_3 + box_1 + box_sum_3 = box_3 + box_1 + box_2 + ref = [1/5., 1/5. + 1/3., 1 + 1/3. + 1/5., 1/5. + 1/3., 1/5.] + assert_almost_equal(box_sum_1.array, ref, decimal=12) + assert_almost_equal(box_sum_2.array, ref, decimal=12) + assert_almost_equal(box_sum_3.array, ref, decimal=12) + + # Assert that the kernels haven't changed + assert_almost_equal(box_1.array, [0.2, 0.2, 0.2, 0.2, 0.2], decimal=12) + assert_almost_equal(box_2.array, [1/3., 1/3., 1/3.], decimal=12) + assert_almost_equal(box_3.array, [1], decimal=12) + + def test_add_2D_kernels(self): + """ + Check if adding of two 1D kernels works. + """ + box_1 = Box2DKernel(3) + box_2 = Box2DKernel(1) + box_sum_1 = box_1 + box_2 + box_sum_2 = box_2 + box_1 + ref = [[1 / 9., 1 / 9., 1 / 9.], + [1 / 9., 1 + 1 / 9., 1 / 9.], + [1 / 9., 1 / 9., 1 / 9.]] + ref_1 = [[1 / 9., 1 / 9., 1 / 9.], + [1 / 9., 1 / 9., 1 / 9.], + [1 / 9., 1 / 9., 1 / 9.]] + assert_almost_equal(box_2.array, [[1]], decimal=12) + assert_almost_equal(box_1.array, ref_1, decimal=12) + assert_almost_equal(box_sum_1.array, ref, decimal=12) + assert_almost_equal(box_sum_2.array, ref, decimal=12) + + def test_Gaussian1DKernel_even_size(self): + """ + Check if even size for GaussianKernel works. + """ + gauss = Gaussian1DKernel(3, x_size=10) + assert gauss.array.size == 10 + + def test_Gaussian2DKernel_even_size(self): + """ + Check if even size for GaussianKernel works. + """ + gauss = Gaussian2DKernel(3, x_size=10, y_size=10) + assert gauss.array.shape == (10, 10) + + def test_normalize_peak(self): + """ + Check if normalize works with peak mode. + """ + custom = CustomKernel([1, 2, 3, 2, 1]) + custom.normalize(mode='peak') + assert custom.array.max() == 1 + + def test_check_kernel_attributes(self): + """ + Check if kernel attributes are correct. + """ + box = Box2DKernel(5) + + # Check truncation + assert box.truncation == 0 + + # Check model + assert isinstance(box.model, Box2D) + + # Check center + assert box.center == [2, 2] + + # Check normalization + box.normalize() + assert_almost_equal(box._kernel_sum, 1., decimal=12) + + # Check separability + assert box.separable + + @pytest.mark.parametrize(('kernel_type', 'mode'), list(itertools.product(KERNEL_TYPES, MODES))) + def test_discretize_modes(self, kernel_type, mode): + """ + Check if the different modes result in kernels that work with convolve. + Use only small kernel width, to make the test pass quickly. + """ + if kernel_type == AiryDisk2DKernel and not HAS_SCIPY: + pytest.skip("Omitting AiryDisk2DKernel, which requires SciPy") + if not kernel_type == Ring2DKernel: + kernel = kernel_type(3) + else: + kernel = kernel_type(3, 3 * 0.2) + + if kernel.dimension == 1: + c1 = convolve_fft(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False) + c2 = convolve(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False) + assert_almost_equal(c1, c2, decimal=12) + else: + c1 = convolve_fft(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False) + c2 = convolve(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False) + assert_almost_equal(c1, c2, decimal=12) + + @pytest.mark.parametrize(('width'), WIDTHS_EVEN) + def test_box_kernels_even_size(self, width): + """ + Check if BoxKernel work properly with even sizes. + """ + kernel_1D = Box1DKernel(width) + assert kernel_1D.shape[0] % 2 != 0 + assert kernel_1D.array.sum() == 1. + + kernel_2D = Box2DKernel(width) + assert np.all([_ % 2 != 0 for _ in kernel_2D.shape]) + assert kernel_2D.array.sum() == 1. + + def test_kernel_normalization(self): + """ + Test that repeated normalizations do not change the kernel [#3747]. + """ + + kernel = CustomKernel(np.ones(5)) + kernel.normalize() + data = np.copy(kernel.array) + + kernel.normalize() + assert_allclose(data, kernel.array) + + kernel.normalize() + assert_allclose(data, kernel.array) + + def test_kernel_normalization_mode(self): + """ + Test that an error is raised if mode is invalid. + """ + with pytest.raises(ValueError): + kernel = CustomKernel(np.ones(3)) + kernel.normalize(mode='invalid') + + def test_kernel1d_int_size(self): + """ + Test that an error is raised if ``Kernel1D`` ``x_size`` is not + an integer. + """ + with pytest.raises(TypeError): + Gaussian1DKernel(3, x_size=1.2) + + def test_kernel2d_int_xsize(self): + """ + Test that an error is raised if ``Kernel2D`` ``x_size`` is not + an integer. + """ + with pytest.raises(TypeError): + Gaussian2DKernel(3, x_size=1.2) + + def test_kernel2d_int_ysize(self): + """ + Test that an error is raised if ``Kernel2D`` ``y_size`` is not + an integer. + """ + with pytest.raises(TypeError): + Gaussian2DKernel(3, x_size=5, y_size=1.2) + + def test_kernel1d_initialization(self): + """ + Test that an error is raised if an array or model is not + specified for ``Kernel1D``. + """ + with pytest.raises(TypeError): + Kernel1D() + + def test_kernel2d_initialization(self): + """ + Test that an error is raised if an array or model is not + specified for ``Kernel2D``. + """ + with pytest.raises(TypeError): + Kernel2D() diff --git a/astropy/convolution/tests/test_pickle.py b/astropy/convolution/tests/test_pickle.py new file mode 100644 index 0000000..15a214b --- /dev/null +++ b/astropy/convolution/tests/test_pickle.py @@ -0,0 +1,27 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import absolute_import, division, print_function, unicode_literals + +import pytest +import numpy as np + +from ... import convolution as conv +from ...tests.helper import pickle_protocol, check_pickling_recovery # noqa + + +@pytest.mark.parametrize(("name", "args", "kwargs", "xfail"), + [(conv.CustomKernel, [], + {'array': np.random.rand(15)}, + False), + (conv.Gaussian1DKernel, [1.0], + {'x_size': 5}, + True), + (conv.Gaussian2DKernel, [1.0], + {'x_size': 5, 'y_size': 5}, + True), + ]) +def test_simple_object(pickle_protocol, name, args, kwargs, xfail): + # Tests easily instantiated objects + if xfail: + pytest.xfail() + original = name(*args, **kwargs) + check_pickling_recovery(original, pickle_protocol) diff --git a/astropy/convolution/utils.py b/astropy/convolution/utils.py new file mode 100644 index 0000000..38f229c --- /dev/null +++ b/astropy/convolution/utils.py @@ -0,0 +1,301 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from ..modeling.core import FittableModel, custom_model +from ..extern.six.moves import range + +__all__ = ['discretize_model'] + + +class DiscretizationError(Exception): + """ + Called when discretization of models goes wrong. + """ + + +class KernelSizeError(Exception): + """ + Called when size of kernels is even. + """ + + +def add_kernel_arrays_1D(array_1, array_2): + """ + Add two 1D kernel arrays of different size. + + The arrays are added with the centers lying upon each other. + """ + if array_1.size > array_2.size: + new_array = array_1.copy() + center = array_1.size // 2 + slice_ = slice(center - array_2.size // 2, + center + array_2.size // 2 + 1) + new_array[slice_] += array_2 + return new_array + elif array_2.size > array_1.size: + new_array = array_2.copy() + center = array_2.size // 2 + slice_ = slice(center - array_1.size // 2, + center + array_1.size // 2 + 1) + new_array[slice_] += array_1 + return new_array + return array_2 + array_1 + + +def add_kernel_arrays_2D(array_1, array_2): + """ + Add two 2D kernel arrays of different size. + + The arrays are added with the centers lying upon each other. + """ + if array_1.size > array_2.size: + new_array = array_1.copy() + center = [axes_size // 2 for axes_size in array_1.shape] + slice_x = slice(center[1] - array_2.shape[1] // 2, + center[1] + array_2.shape[1] // 2 + 1) + slice_y = slice(center[0] - array_2.shape[0] // 2, + center[0] + array_2.shape[0] // 2 + 1) + new_array[slice_y, slice_x] += array_2 + return new_array + elif array_2.size > array_1.size: + new_array = array_2.copy() + center = [axes_size // 2 for axes_size in array_2.shape] + slice_x = slice(center[1] - array_1.shape[1] // 2, + center[1] + array_1.shape[1] // 2 + 1) + slice_y = slice(center[0] - array_1.shape[0] // 2, + center[0] + array_1.shape[0] // 2 + 1) + new_array[slice_y, slice_x] += array_1 + return new_array + return array_2 + array_1 + + +def discretize_model(model, x_range, y_range=None, mode='center', factor=10): + """ + Function to evaluate analytical model functions on a grid. + + So far the function can only deal with pixel coordinates. + + Parameters + ---------- + model : `~astropy.modeling.FittableModel` or callable. + Analytic model function to be discretized. Callables, which are not an + instances of `~astropy.modeling.FittableModel` are passed to + `~astropy.modeling.custom_model` and then evaluated. + x_range : tuple + x range in which the model is evaluated. The difference between the + upper an lower limit must be a whole number, so that the output array + size is well defined. + y_range : tuple, optional + y range in which the model is evaluated. The difference between the + upper an lower limit must be a whole number, so that the output array + size is well defined. Necessary only for 2D models. + mode : str, optional + One of the following modes: + * ``'center'`` (default) + Discretize model by taking the value + at the center of the bin. + * ``'linear_interp'`` + Discretize model by linearly interpolating + between the values at the corners of the bin. + For 2D models interpolation is bilinear. + * ``'oversample'`` + Discretize model by taking the average + on an oversampled grid. + * ``'integrate'`` + Discretize model by integrating the model + over the bin using `scipy.integrate.quad`. + Very slow. + factor : float or int + Factor of oversampling. Default = 10. + + Returns + ------- + array : `numpy.array` + Model value array + + Notes + ----- + The ``oversample`` mode allows to conserve the integral on a subpixel + scale. Here is the example of a normalized Gaussian1D: + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + import numpy as np + from astropy.modeling.models import Gaussian1D + from astropy.convolution.utils import discretize_model + gauss_1D = Gaussian1D(1 / (0.5 * np.sqrt(2 * np.pi)), 0, 0.5) + y_center = discretize_model(gauss_1D, (-2, 3), mode='center') + y_corner = discretize_model(gauss_1D, (-2, 3), mode='linear_interp') + y_oversample = discretize_model(gauss_1D, (-2, 3), mode='oversample') + plt.plot(y_center, label='center sum = {0:3f}'.format(y_center.sum())) + plt.plot(y_corner, label='linear_interp sum = {0:3f}'.format(y_corner.sum())) + plt.plot(y_oversample, label='oversample sum = {0:3f}'.format(y_oversample.sum())) + plt.xlabel('pixels') + plt.ylabel('value') + plt.legend() + plt.show() + + + """ + if not callable(model): + raise TypeError('Model must be callable.') + if not isinstance(model, FittableModel): + model = custom_model(model)() + ndim = model.n_inputs + if ndim > 2: + raise ValueError('discretize_model only supports 1-d and 2-d models.') + + if not float(np.diff(x_range)).is_integer(): + raise ValueError("The difference between the upper an lower limit of" + " 'x_range' must be a whole number.") + + if y_range: + if not float(np.diff(y_range)).is_integer(): + raise ValueError("The difference between the upper an lower limit of" + " 'y_range' must be a whole number.") + + if ndim == 2 and y_range is None: + raise ValueError("y range not specified, but model is 2-d") + if ndim == 1 and y_range is not None: + raise ValueError("y range specified, but model is only 1-d.") + if mode == "center": + if ndim == 1: + return discretize_center_1D(model, x_range) + elif ndim == 2: + return discretize_center_2D(model, x_range, y_range) + elif mode == "linear_interp": + if ndim == 1: + return discretize_linear_1D(model, x_range) + if ndim == 2: + return discretize_bilinear_2D(model, x_range, y_range) + elif mode == "oversample": + if ndim == 1: + return discretize_oversample_1D(model, x_range, factor) + if ndim == 2: + return discretize_oversample_2D(model, x_range, y_range, factor) + elif mode == "integrate": + if ndim == 1: + return discretize_integrate_1D(model, x_range) + if ndim == 2: + return discretize_integrate_2D(model, x_range, y_range) + else: + raise DiscretizationError('Invalid mode.') + + +def discretize_center_1D(model, x_range): + """ + Discretize model by taking the value at the center of the bin. + """ + x = np.arange(*x_range) + return model(x) + + +def discretize_center_2D(model, x_range, y_range): + """ + Discretize model by taking the value at the center of the pixel. + """ + x = np.arange(*x_range) + y = np.arange(*y_range) + x, y = np.meshgrid(x, y) + return model(x, y) + + +def discretize_linear_1D(model, x_range): + """ + Discretize model by performing a linear interpolation. + """ + # Evaluate model 0.5 pixel outside the boundaries + x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) + values_intermediate_grid = model(x) + return 0.5 * (values_intermediate_grid[1:] + values_intermediate_grid[:-1]) + + +def discretize_bilinear_2D(model, x_range, y_range): + """ + Discretize model by performing a bilinear interpolation. + """ + # Evaluate model 0.5 pixel outside the boundaries + x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) + y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5) + x, y = np.meshgrid(x, y) + values_intermediate_grid = model(x, y) + + # Mean in y direction + values = 0.5 * (values_intermediate_grid[1:, :] + + values_intermediate_grid[:-1, :]) + # Mean in x direction + values = 0.5 * (values[:, 1:] + + values[:, :-1]) + return values + + +def discretize_oversample_1D(model, x_range, factor=10): + """ + Discretize model by taking the average on an oversampled grid. + """ + # Evaluate model on oversampled grid + x = np.arange(x_range[0] - 0.5 * (1 - 1 / factor), + x_range[1] + 0.5 * (1 + 1 / factor), 1. / factor) + + values = model(x) + + # Reshape and compute mean + values = np.reshape(values, (x.size // factor, factor)) + return values.mean(axis=1)[:-1] + + +def discretize_oversample_2D(model, x_range, y_range, factor=10): + """ + Discretize model by taking the average on an oversampled grid. + """ + # Evaluate model on oversampled grid + x = np.arange(x_range[0] - 0.5 * (1 - 1 / factor), + x_range[1] + 0.5 * (1 + 1 / factor), 1. / factor) + + y = np.arange(y_range[0] - 0.5 * (1 - 1 / factor), + y_range[1] + 0.5 * (1 + 1 / factor), 1. / factor) + x_grid, y_grid = np.meshgrid(x, y) + values = model(x_grid, y_grid) + + # Reshape and compute mean + shape = (y.size // factor, factor, x.size // factor, factor) + values = np.reshape(values, shape) + return values.mean(axis=3).mean(axis=1)[:-1, :-1] + + +def discretize_integrate_1D(model, x_range): + """ + Discretize model by integrating numerically the model over the bin. + """ + from scipy.integrate import quad + # Set up grid + x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) + values = np.array([]) + + # Integrate over all bins + for i in range(x.size - 1): + values = np.append(values, quad(model, x[i], x[i + 1])[0]) + return values + + +def discretize_integrate_2D(model, x_range, y_range): + """ + Discretize model by integrating the model over the pixel. + """ + from scipy.integrate import dblquad + # Set up grid + x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) + y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5) + values = np.empty((y.size - 1, x.size - 1)) + + # Integrate over all pixels + for i in range(x.size - 1): + for j in range(y.size - 1): + values[j, i] = dblquad(lambda y, x: model(x, y), x[i], x[i + 1], + lambda x: y[j], lambda x: y[j + 1])[0] + return values diff --git a/astropy/coordinates/__init__.py b/astropy/coordinates/__init__.py new file mode 100644 index 0000000..0b83b10 --- /dev/null +++ b/astropy/coordinates/__init__.py @@ -0,0 +1,43 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This subpackage contains classes and functions for celestial coordinates +of astronomical objects. It also contains a framework for conversions +between coordinate systems. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from .errors import * +from .angles import * +from .baseframe import * +from .attributes import * +from .distances import * +from .earth import * +from .transformations import * +from .builtin_frames import * +from .name_resolve import * +from .matching import * +from .representation import * +from .sky_coordinate import * +from .funcs import * +from .calculation import * +from .solar_system import * + +# This is for backwards-compatibility -- can be removed in v3.0 when the +# deprecation warnings are removed +from .attributes import (TimeFrameAttribute, QuantityFrameAttribute, + CartesianRepresentationFrameAttribute) + +__doc__ += builtin_frames._transform_graph_docs + """ + +.. note:: + + The ecliptic coordinate systems (added in Astropy v1.1) have not been + extensively tested for accuracy or consistency with other implementations of + ecliptic coordinates. We welcome contributions to add such testing, but in + the meantime, users who depend on consistency with other implementations may + wish to check test inputs against good datasets before using Astropy's + ecliptic coordinates. + +""" diff --git a/astropy/coordinates/angle_lextab.py b/astropy/coordinates/angle_lextab.py new file mode 100644 index 0000000..819c826 --- /dev/null +++ b/astropy/coordinates/angle_lextab.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, unicode_literals) + +_tabversion = '3.8' +_lextokens = set(('UINT', 'SIMPLE_UNIT', 'DEGREE', 'MINUTE', 'HOUR', 'COLON', 'UFLOAT', 'SIGN', 'SECOND')) +_lexreflags = 0 +_lexliterals = '' +_lexstateinfo = {'INITIAL': 'inclusive'} +_lexstatere = {'INITIAL': [('(?P((\\d+\\.\\d*)|(\\.\\d+))([eE][+-−]?\\d+)?)|(?P\\d+)|(?P[+−-])|(?P(?:karcsec)|(?:uarcsec)|(?:Earcmin)|(?:Zdeg)|(?:crad)|(?:cycle)|(?:hectoradian)|(?:Yarcmin)|(?:kiloarcsecond)|(?:zeptoarcminute)|(?:adeg)|(?:darcmin)|(?:ddeg)|(?:exaradian)|(?:parcsec)|(?:yoctoradian)|(?:arcsecond)|(?:petadegree)|(?:petaarcminute)|(?:microarcsecond)|(?:mas)|(?:parcmin)|(?:hdeg)|(?:narcmin)|(?:attodegree)|(?:kilodegree)|(?:zettaradian)|(?:fdeg)|(?:zeptoradian)|(?:microradian)|(?:Gdeg)|(?:hectodegree)|(?:attoarcsecond)|(?:Marcmin)|(?:exadegree)|(?:femtodegree)|(?:yottaradian)|(?:pdeg)|(?:zarcmin)|(?:kiloarcminute)|(?:urad)|(?:teraarcsecond)|(?:nrad)|(?:carcsec)|(?:Pdeg)|(?:Yrad)|(?:yrad)|(?:picoarcsecond)|(?:aarcsec)|(?:dekaradian)|(?:Zrad)|(?:femtoradian)|(?:yarcsec)|(?:arcmin)|(?:arcsec)|(?:yottadegree)|(?:drad)|(?:dekadegree)|(?:zdeg)|(?:zeptoarcsecond)|(?:farcmin)|(?:Parcmin)|(?:decaarcminute)|(?:nanoarcminute)|(?:nanoarcsecond)|(?:Tdeg)|(?:decaarcsecond)|(?:nanodegree)|(?:farcsec)|(?:femtoarcminute)|(?:microdegree)|(?:deciarcsecond)|(?:deciarcminute)|(?:attoradian)|(?:dadeg)|(?:decidegree)|(?:hectoarcminute)|(?:milliarcsecond)|(?:femtoarcsecond)|(?:megaarcminute)|(?:yoctoarcminute)|(?:zrad)|(?:hectoarcsecond)|(?:frad)|(?:centiarcsecond)|(?:carcmin)|(?:Garcmin)|(?:decadegree)|(?:Grad)|(?:petaarcsecond)|(?:gigaarcsecond)|(?:megaradian)|(?:Tarcsec)|(?:Prad)|(?:zettadegree)|(?:yottaarcminute)|(?:mrad)|(?:yottaarcsecond)|(?:exaarcminute)|(?:harcmin)|(?:dekaarcsecond)|(?:cy)|(?:ndeg)|(?:teraradian)|(?:teradegree)|(?:Zarcsec)|(?:gigadegree)|(?:Mdeg)|(?:Mrad)|(?:centiarcminute)|(?:uarcmin)|(?:picoradian)|(?:radian)|(?:ydeg)|(?:milliarcminute)|(?:deciradian)|(?:narcsec)|(?:Trad)|(?:picodegree)|(?:yoctodegree)|(?:zettaarcminute)|(?:daarcmin)|(?:arcminute)|(?:yarcmin)|(?:kdeg)|(?:Earcsec)|(?:Edeg)|(?:harcsec)|(?:rad)|(?:centidegree)|(?:Garcsec)|(?:marcsec)|(?:megaarcsecond)|(?:attoarcminute)|(?:cdeg)|(?:Erad)|(?:kiloradian)|(?:daarcsec)|(?:Parcsec)|(?:megadegree)|(?:millidegree)|(?:centiradian)|(?:uas)|(?:teraarcminute)|(?:prad)|(?:yoctoarcsecond)|(?:hrad)|(?:picoarcminute)|(?:petaradian)|(?:Marcsec)|(?:marcmin)|(?:Tarcmin)|(?:zeptodegree)|(?:Yarcsec)|(?:gigaarcminute)|(?:Zarcmin)|(?:arad)|(?:karcmin)|(?:darcsec)|(?:exaarcsecond)|(?:nanoradian)|(?:udeg)|(?:zarcsec)|(?:Ydeg)|(?:decaradian)|(?:milliradian)|(?:aarcmin)|(?:zettaarcsecond)|(?:darad)|(?:microarcminute)|(?:mdeg)|(?:dekaarcminute)|(?:krad)|(?:gigaradian))|(?Pm(in(ute(s)?)?)?|′|\\\'|ᵐ)|(?Ps(ec(ond(s)?)?)?|″|\\"|ˢ)|(?Pd(eg(ree(s)?)?)?|°)|(?Phour(s)?|h(r)?|ʰ)|(?P:)', [None, ('t_UFLOAT', 'UFLOAT'), None, None, None, None, ('t_UINT', 'UINT'), ('t_SIGN', 'SIGN'), ('t_SIMPLE_UNIT', 'SIMPLE_UNIT'), (None, 'MINUTE'), None, None, None, (None, 'SECOND'), None, None, None, (None, 'DEGREE'), None, None, None, (None, 'HOUR'), None, None, (None, 'COLON')])]} +_lexstateignore = {'INITIAL': ' '} +_lexstateerrorf = {'INITIAL': 't_error'} +_lexstateeoff = {} diff --git a/astropy/coordinates/angle_parsetab.py b/astropy/coordinates/angle_parsetab.py new file mode 100644 index 0000000..2789773 --- /dev/null +++ b/astropy/coordinates/angle_parsetab.py @@ -0,0 +1,66 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, unicode_literals) + + +# This file is automatically generated. Do not edit. +_tabversion = '3.8' + +_lr_method = 'LALR' + +_lr_signature = 'DA395940D76FFEB6A68EA2DB16FC015D' + +_lr_action_items = {'UINT':([0,2,10,12,19,20,22,23,35,36,38,],[-7,12,-6,19,25,27,29,32,25,25,25,]),'MINUTE':([4,6,8,12,13,19,21,24,25,26,27,28,29,31,32,34,40,],[16,-14,-15,-17,-16,-9,-12,-8,-9,-13,-9,-10,36,37,38,39,-11,]),'COLON':([12,27,],[20,35,]),'$end':([1,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,21,22,23,24,25,26,27,28,29,30,31,32,33,34,36,37,38,39,40,41,42,43,44,],[-4,-1,-32,-5,-14,-2,-15,-3,0,-17,-16,-33,-31,-35,-24,-34,-9,-12,-25,-18,-8,-9,-13,-9,-10,-9,-26,-8,-9,-19,-8,-27,-28,-20,-21,-11,-29,-22,-30,-23,]),'SIMPLE_UNIT':([4,6,8,12,13,19,21,24,25,26,27,28,40,],[14,-14,-15,-17,-16,-9,-12,-8,-9,-13,-9,-10,-11,]),'DEGREE':([4,6,8,12,13,19,21,24,25,26,27,28,40,],[15,-14,-15,22,-16,-9,-12,-8,-9,-13,-9,-10,-11,]),'UFLOAT':([0,2,10,12,19,20,22,23,35,36,38,],[-7,13,-6,24,24,24,31,34,24,24,24,]),'HOUR':([4,6,8,12,13,19,21,24,25,26,27,28,40,],[17,-14,-15,23,-16,-9,-12,-8,-9,-13,-9,-10,-11,]),'SECOND':([4,6,8,12,13,19,21,24,25,26,27,28,40,41,42,],[18,-14,-15,-17,-16,-9,-12,-8,-9,-13,-9,-10,-11,43,44,]),'SIGN':([0,],[10,]),} + +_lr_action = {} +for _k, _v in _lr_action_items.items(): + for _x,_y in zip(_v[0],_v[1]): + if not _x in _lr_action: _lr_action[_x] = {} + _lr_action[_x][_k] = _y +del _lr_action_items + +_lr_goto_items = {'ufloat':([12,19,20,22,23,35,36,38,],[21,26,28,30,33,40,41,42,]),'generic':([0,],[4,]),'arcminute':([0,],[1,]),'simple':([0,],[5,]),'sign':([0,],[2,]),'colon':([0,],[6,]),'dms':([0,],[7,]),'hms':([0,],[3,]),'spaced':([0,],[8,]),'angle':([0,],[11,]),'arcsecond':([0,],[9,]),} + +_lr_goto = {} +for _k, _v in _lr_goto_items.items(): + for _x, _y in zip(_v[0], _v[1]): + if not _x in _lr_goto: _lr_goto[_x] = {} + _lr_goto[_x][_k] = _y +del _lr_goto_items +_lr_productions = [ + ("S' -> angle","S'",1,None,None,None), + ('angle -> hms','angle',1,'p_angle','angle_utilities.py',134), + ('angle -> dms','angle',1,'p_angle','angle_utilities.py',135), + ('angle -> arcsecond','angle',1,'p_angle','angle_utilities.py',136), + ('angle -> arcminute','angle',1,'p_angle','angle_utilities.py',137), + ('angle -> simple','angle',1,'p_angle','angle_utilities.py',138), + ('sign -> SIGN','sign',1,'p_sign','angle_utilities.py',144), + ('sign -> ','sign',0,'p_sign','angle_utilities.py',145), + ('ufloat -> UFLOAT','ufloat',1,'p_ufloat','angle_utilities.py',154), + ('ufloat -> UINT','ufloat',1,'p_ufloat','angle_utilities.py',155), + ('colon -> sign UINT COLON ufloat','colon',4,'p_colon','angle_utilities.py',161), + ('colon -> sign UINT COLON UINT COLON ufloat','colon',6,'p_colon','angle_utilities.py',162), + ('spaced -> sign UINT ufloat','spaced',3,'p_spaced','angle_utilities.py',171), + ('spaced -> sign UINT UINT ufloat','spaced',4,'p_spaced','angle_utilities.py',172), + ('generic -> colon','generic',1,'p_generic','angle_utilities.py',181), + ('generic -> spaced','generic',1,'p_generic','angle_utilities.py',182), + ('generic -> sign UFLOAT','generic',2,'p_generic','angle_utilities.py',183), + ('generic -> sign UINT','generic',2,'p_generic','angle_utilities.py',184), + ('hms -> sign UINT HOUR','hms',3,'p_hms','angle_utilities.py',193), + ('hms -> sign UINT HOUR ufloat','hms',4,'p_hms','angle_utilities.py',194), + ('hms -> sign UINT HOUR UINT MINUTE','hms',5,'p_hms','angle_utilities.py',195), + ('hms -> sign UINT HOUR UFLOAT MINUTE','hms',5,'p_hms','angle_utilities.py',196), + ('hms -> sign UINT HOUR UINT MINUTE ufloat','hms',6,'p_hms','angle_utilities.py',197), + ('hms -> sign UINT HOUR UINT MINUTE ufloat SECOND','hms',7,'p_hms','angle_utilities.py',198), + ('hms -> generic HOUR','hms',2,'p_hms','angle_utilities.py',199), + ('dms -> sign UINT DEGREE','dms',3,'p_dms','angle_utilities.py',212), + ('dms -> sign UINT DEGREE ufloat','dms',4,'p_dms','angle_utilities.py',213), + ('dms -> sign UINT DEGREE UINT MINUTE','dms',5,'p_dms','angle_utilities.py',214), + ('dms -> sign UINT DEGREE UFLOAT MINUTE','dms',5,'p_dms','angle_utilities.py',215), + ('dms -> sign UINT DEGREE UINT MINUTE ufloat','dms',6,'p_dms','angle_utilities.py',216), + ('dms -> sign UINT DEGREE UINT MINUTE ufloat SECOND','dms',7,'p_dms','angle_utilities.py',217), + ('dms -> generic DEGREE','dms',2,'p_dms','angle_utilities.py',218), + ('simple -> generic','simple',1,'p_simple','angle_utilities.py',231), + ('simple -> generic SIMPLE_UNIT','simple',2,'p_simple','angle_utilities.py',232), + ('arcsecond -> generic SECOND','arcsecond',2,'p_arcsecond','angle_utilities.py',241), + ('arcminute -> generic MINUTE','arcminute',2,'p_arcminute','angle_utilities.py',247), +] diff --git a/astropy/coordinates/angle_utilities.py b/astropy/coordinates/angle_utilities.py new file mode 100644 index 0000000..cfa894e --- /dev/null +++ b/astropy/coordinates/angle_utilities.py @@ -0,0 +1,697 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# Note that files generated by lex/yacc not always fully py 2/3 compatible. +# Hence, the ``clean_parse_tables.py`` tool in the astropy-tools +# (https://github.com/astropy/astropy-tools) repository should be used to fix +# this when/if lextab/parsetab files are re-generated. + +""" +This module contains utility functions that are for internal use in +astropy.coordinates.angles. Mainly they are conversions from one format +of data to another. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import os +from warnings import warn + +import numpy as np + +from .errors import (IllegalHourWarning, IllegalHourError, + IllegalMinuteWarning, IllegalMinuteError, + IllegalSecondWarning, IllegalSecondError) +from ..utils import format_exception +from .. import units as u + + +class _AngleParser(object): + """ + Parses the various angle formats including: + + * 01:02:30.43 degrees + * 1 2 0 hours + * 1°2′3″ + * 1d2m3s + * -1h2m3s + + This class should not be used directly. Use `parse_angle` + instead. + """ + def __init__(self): + # TODO: in principle, the parser should be invalidated if we change unit + # system (from CDS to FITS, say). Might want to keep a link to the + # unit_registry used, and regenerate the parser/lexer if it changes. + # Alternatively, perhaps one should not worry at all and just pre- + # generate the parser for each release (as done for unit formats). + # For some discussion of this problem, see + # https://github.com/astropy/astropy/issues/5350#issuecomment-248770151 + if '_parser' not in _AngleParser.__dict__: + _AngleParser._parser, _AngleParser._lexer = self._make_parser() + + @classmethod + def _get_simple_unit_names(cls): + simple_units = set( + u.radian.find_equivalent_units(include_prefix_units=True)) + simple_unit_names = set() + # We filter out degree and hourangle, since those are treated + # separately. + for unit in simple_units: + if unit != u.deg and unit != u.hourangle: + simple_unit_names.update(unit.names) + return list(simple_unit_names) + + @classmethod + def _make_parser(cls): + from ..extern.ply import lex, yacc + + # List of token names. + tokens = ( + 'SIGN', + 'UINT', + 'UFLOAT', + 'COLON', + 'DEGREE', + 'HOUR', + 'MINUTE', + 'SECOND', + 'SIMPLE_UNIT' + ) + + # NOTE THE ORDERING OF THESE RULES IS IMPORTANT!! + # Regular expression rules for simple tokens + def t_UFLOAT(t): + r'((\d+\.\d*)|(\.\d+))([eE][+-−]?\d+)?' + # The above includes Unicode "MINUS SIGN" \u2212. It is + # important to include the hyphen last, or the regex will + # treat this as a range. + t.value = float(t.value.replace('−', '-')) + return t + + def t_UINT(t): + r'\d+' + t.value = int(t.value) + return t + + def t_SIGN(t): + r'[+−-]' + # The above include Unicode "MINUS SIGN" \u2212. It is + # important to include the hyphen last, or the regex will + # treat this as a range. + if t.value == '+': + t.value = 1.0 + else: + t.value = -1.0 + return t + + def t_SIMPLE_UNIT(t): + t.value = u.Unit(t.value) + return t + t_SIMPLE_UNIT.__doc__ = '|'.join( + '(?:{0})'.format(x) for x in cls._get_simple_unit_names()) + + t_COLON = ':' + t_DEGREE = r'd(eg(ree(s)?)?)?|°' + t_HOUR = r'hour(s)?|h(r)?|ʰ' + t_MINUTE = r'm(in(ute(s)?)?)?|′|\'|ᵐ' + t_SECOND = r's(ec(ond(s)?)?)?|″|\"|ˢ' + + # A string containing ignored characters (spaces) + t_ignore = ' ' + + # Error handling rule + def t_error(t): + raise ValueError( + "Invalid character at col {0}".format(t.lexpos)) + + # Build the lexer + # PY2: need str() to ensure we do not pass on a unicode object. + lexer = lex.lex(optimize=True, lextab=str('angle_lextab'), + outputdir=os.path.dirname(__file__)) + + def p_angle(p): + ''' + angle : hms + | dms + | arcsecond + | arcminute + | simple + ''' + p[0] = p[1] + + def p_sign(p): + ''' + sign : SIGN + | + ''' + if len(p) == 2: + p[0] = p[1] + else: + p[0] = 1.0 + + def p_ufloat(p): + ''' + ufloat : UFLOAT + | UINT + ''' + p[0] = float(p[1]) + + def p_colon(p): + ''' + colon : sign UINT COLON ufloat + | sign UINT COLON UINT COLON ufloat + ''' + if len(p) == 5: + p[0] = (p[1] * p[2], p[4]) + elif len(p) == 7: + p[0] = (p[1] * p[2], p[4], p[6]) + + def p_spaced(p): + ''' + spaced : sign UINT ufloat + | sign UINT UINT ufloat + ''' + if len(p) == 4: + p[0] = (p[1] * p[2], p[3]) + elif len(p) == 5: + p[0] = (p[1] * p[2], p[3], p[4]) + + def p_generic(p): + ''' + generic : colon + | spaced + | sign UFLOAT + | sign UINT + ''' + if len(p) == 2: + p[0] = p[1] + else: + p[0] = p[1] * p[2] + + def p_hms(p): + ''' + hms : sign UINT HOUR + | sign UINT HOUR ufloat + | sign UINT HOUR UINT MINUTE + | sign UINT HOUR UFLOAT MINUTE + | sign UINT HOUR UINT MINUTE ufloat + | sign UINT HOUR UINT MINUTE ufloat SECOND + | generic HOUR + ''' + if len(p) == 3: + p[0] = (p[1], u.hourangle) + elif len(p) == 4: + p[0] = (p[1] * p[2], u.hourangle) + elif len(p) in (5, 6): + p[0] = ((p[1] * p[2], p[4]), u.hourangle) + elif len(p) in (7, 8): + p[0] = ((p[1] * p[2], p[4], p[6]), u.hourangle) + + def p_dms(p): + ''' + dms : sign UINT DEGREE + | sign UINT DEGREE ufloat + | sign UINT DEGREE UINT MINUTE + | sign UINT DEGREE UFLOAT MINUTE + | sign UINT DEGREE UINT MINUTE ufloat + | sign UINT DEGREE UINT MINUTE ufloat SECOND + | generic DEGREE + ''' + if len(p) == 3: + p[0] = (p[1], u.degree) + elif len(p) == 4: + p[0] = (p[1] * p[2], u.degree) + elif len(p) in (5, 6): + p[0] = ((p[1] * p[2], p[4]), u.degree) + elif len(p) in (7, 8): + p[0] = ((p[1] * p[2], p[4], p[6]), u.degree) + + def p_simple(p): + ''' + simple : generic + | generic SIMPLE_UNIT + ''' + if len(p) == 2: + p[0] = (p[1], None) + else: + p[0] = (p[1], p[2]) + + def p_arcsecond(p): + ''' + arcsecond : generic SECOND + ''' + p[0] = (p[1], u.arcsecond) + + def p_arcminute(p): + ''' + arcminute : generic MINUTE + ''' + p[0] = (p[1], u.arcminute) + + def p_error(p): + raise ValueError + + # PY2: need str() to ensure we do not pass on a unicode object. + parser = yacc.yacc(debug=False, tabmodule=str('angle_parsetab'), + outputdir=os.path.dirname(__file__), + write_tables=True) + + return parser, lexer + + def parse(self, angle, unit, debug=False): + try: + found_angle, found_unit = self._parser.parse( + angle, lexer=self._lexer, debug=debug) + except ValueError as e: + if str(e): + raise ValueError("{0} in angle {1!r}".format( + str(e), angle)) + else: + raise ValueError( + "Syntax error parsing angle {0!r}".format(angle)) + + if unit is None and found_unit is None: + raise u.UnitsError("No unit specified") + + return found_angle, found_unit + + +def _check_hour_range(hrs): + """ + Checks that the given value is in the range (-24, 24). + """ + if np.any(np.abs(hrs) == 24.): + warn(IllegalHourWarning(hrs, 'Treating as 24 hr')) + elif np.any(hrs < -24.) or np.any(hrs > 24.): + raise IllegalHourError(hrs) + + +def _check_minute_range(m): + """ + Checks that the given value is in the range [0,60]. If the value + is equal to 60, then a warning is raised. + """ + if np.any(m == 60.): + warn(IllegalMinuteWarning(m, 'Treating as 0 min, +1 hr/deg')) + elif np.any(m < -60.) or np.any(m > 60.): + # "Error: minutes not in range [-60,60) ({0}).".format(min)) + raise IllegalMinuteError(m) + + +def _check_second_range(sec): + """ + Checks that the given value is in the range [0,60]. If the value + is equal to 60, then a warning is raised. + """ + if np.any(sec == 60.): + warn(IllegalSecondWarning(sec, 'Treating as 0 sec, +1 min')) + elif sec is None: + pass + elif np.any(sec < -60.) or np.any(sec > 60.): + # "Error: seconds not in range [-60,60) ({0}).".format(sec)) + raise IllegalSecondError(sec) + + +def check_hms_ranges(h, m, s): + """ + Checks that the given hour, minute and second are all within + reasonable range. + """ + _check_hour_range(h) + _check_minute_range(m) + _check_second_range(s) + return None + + +def parse_angle(angle, unit=None, debug=False): + """ + Parses an input string value into an angle value. + + Parameters + ---------- + angle : str + A string representing the angle. May be in one of the following forms: + + * 01:02:30.43 degrees + * 1 2 0 hours + * 1°2′3″ + * 1d2m3s + * -1h2m3s + + unit : `~astropy.units.UnitBase` instance, optional + The unit used to interpret the string. If ``unit`` is not + provided, the unit must be explicitly represented in the + string, either at the end or as number separators. + + debug : bool, optional + If `True`, print debugging information from the parser. + + Returns + ------- + value, unit : tuple + ``value`` is the value as a floating point number or three-part + tuple, and ``unit`` is a `Unit` instance which is either the + unit passed in or the one explicitly mentioned in the input + string. + """ + return _AngleParser().parse(angle, unit, debug=debug) + + +def degrees_to_dms(d): + """ + Convert a floating-point degree value into a ``(degree, arcminute, + arcsecond)`` tuple. + """ + sign = np.copysign(1.0, d) + + (df, d) = np.modf(np.abs(d)) # (degree fraction, degree) + (mf, m) = np.modf(df * 60.) # (minute fraction, minute) + s = mf * 60. + + return np.floor(sign * d), sign * np.floor(m), sign * s + + +def dms_to_degrees(d, m, s=None): + """ + Convert degrees, arcminute, arcsecond to a float degrees value. + """ + + _check_minute_range(m) + _check_second_range(s) + + # determine sign + sign = np.copysign(1.0, d) + + try: + d = np.floor(np.abs(d)) + if s is None: + m = np.abs(m) + s = 0 + else: + m = np.floor(np.abs(m)) + s = np.abs(s) + except ValueError: + raise ValueError(format_exception( + "{func}: dms values ({1[0]},{2[1]},{3[2]}) could not be " + "converted to numbers.", d, m, s)) + + return sign * (d + m / 60. + s / 3600.) + + +def hms_to_hours(h, m, s=None): + """ + Convert hour, minute, second to a float hour value. + """ + + check_hms_ranges(h, m, s) + + # determine sign + sign = np.copysign(1.0, h) + + try: + h = np.floor(np.abs(h)) + if s is None: + m = np.abs(m) + s = 0 + else: + m = np.floor(np.abs(m)) + s = np.abs(s) + except ValueError: + raise ValueError(format_exception( + "{func}: HMS values ({1[0]},{2[1]},{3[2]}) could not be " + "converted to numbers.", h, m, s)) + + return sign * (h + m / 60. + s / 3600.) + + +def hms_to_degrees(h, m, s): + """ + Convert hour, minute, second to a float degrees value. + """ + + return hms_to_hours(h, m, s) * 15. + + +def hms_to_radians(h, m, s): + """ + Convert hour, minute, second to a float radians value. + """ + + return u.degree.to(u.radian, hms_to_degrees(h, m, s)) + + +def hms_to_dms(h, m, s): + """ + Convert degrees, arcminutes, arcseconds to an ``(hour, minute, second)`` + tuple. + """ + + return degrees_to_dms(hms_to_degrees(h, m, s)) + + +def hours_to_decimal(h): + """ + Convert any parseable hour value into a float value. + """ + from . import angles + return angles.Angle(h, unit=u.hourangle).hour + + +def hours_to_radians(h): + """ + Convert an angle in Hours to Radians. + """ + + return u.hourangle.to(u.radian, h) + + +def hours_to_hms(h): + """ + Convert an floating-point hour value into an ``(hour, minute, + second)`` tuple. + """ + + sign = np.copysign(1.0, h) + + (hf, h) = np.modf(np.abs(h)) # (degree fraction, degree) + (mf, m) = np.modf(hf * 60.0) # (minute fraction, minute) + s = mf * 60.0 + + return (np.floor(sign * h), sign * np.floor(m), sign * s) + + +def radians_to_degrees(r): + """ + Convert an angle in Radians to Degrees. + """ + return u.radian.to(u.degree, r) + + +def radians_to_hours(r): + """ + Convert an angle in Radians to Hours. + """ + return u.radian.to(u.hourangle, r) + + +def radians_to_hms(r): + """ + Convert an angle in Radians to an ``(hour, minute, second)`` tuple. + """ + + hours = radians_to_hours(r) + return hours_to_hms(hours) + + +def radians_to_dms(r): + """ + Convert an angle in Radians to an ``(degree, arcminute, + arcsecond)`` tuple. + """ + + degrees = u.radian.to(u.degree, r) + return degrees_to_dms(degrees) + + +def sexagesimal_to_string(values, precision=None, pad=False, sep=(':',), + fields=3): + """ + Given an already separated tuple of sexagesimal values, returns + a string. + + See `hours_to_string` and `degrees_to_string` for a higher-level + interface to this functionality. + """ + + # If the coordinates are negative, we need to take the absolute value of + # the (arc)minutes and (arc)seconds. We need to use np.abs because abs(-0) + # is -0. + values = (values[0], np.abs(values[1]), np.abs(values[2])) + + if pad: + # Check to see if values[0] is negative, using np.copysign to handle -0 + if np.copysign(1.0, values[0]) == -1: + pad = 3 + else: + pad = 2 + else: + pad = 0 + + if not isinstance(sep, tuple): + sep = tuple(sep) + + if fields < 1 or fields > 3: + raise ValueError( + "fields must be 1, 2, or 3") + + if not sep: # empty string, False, or None, etc. + sep = ('', '', '') + elif len(sep) == 1: + if fields == 3: + sep = sep + (sep[0], '') + elif fields == 2: + sep = sep + ('', '') + else: + sep = ('', '', '') + elif len(sep) == 2: + sep = sep + ('',) + elif len(sep) != 3: + raise ValueError( + "Invalid separator specification for converting angle to string.") + + # Simplify the expression based on the requested precision. For + # example, if the seconds will round up to 60, we should convert + # it to 0 and carry upwards. If the field is hidden (by the + # fields kwarg) we round up around the middle, 30.0. + if precision is None: + rounding_thresh = 60.0 - (10.0 ** -4) + else: + rounding_thresh = 60.0 - (10.0 ** -precision) + + values = list(values) + if fields == 3 and values[2] >= rounding_thresh: + values[2] = 0.0 + values[1] += 1.0 + elif fields < 3 and values[2] >= 30.0: + values[1] += 1.0 + + if fields >= 2 and int(values[1]) >= 60.0: + values[1] = 0.0 + values[0] += 1.0 + elif fields < 2 and int(values[1]) >= 30.0: + values[0] += 1.0 + + literal = [] + last_value = '' + literal.append('{0:0{pad}.0f}{sep[0]}') + if fields >= 2: + literal.append('{1:02d}{sep[1]}') + if fields == 3: + if precision is None: + last_value = '{0:.4f}'.format(abs(values[2])) + last_value = last_value.rstrip('0').rstrip('.') + else: + last_value = '{0:.{precision}f}'.format( + abs(values[2]), precision=precision) + if len(last_value) == 1 or last_value[1] == '.': + last_value = '0' + last_value + literal.append('{last_value}{sep[2]}') + literal = ''.join(literal) + return literal.format(values[0], int(abs(values[1])), abs(values[2]), + sep=sep, pad=pad, + last_value=last_value) + + +def hours_to_string(h, precision=5, pad=False, sep=('h', 'm', 's'), + fields=3): + """ + Takes a decimal hour value and returns a string formatted as hms with + separator specified by the 'sep' parameter. + + ``h`` must be a scalar. + """ + h, m, s = hours_to_hms(h) + return sexagesimal_to_string((h, m, s), precision=precision, pad=pad, + sep=sep, fields=fields) + + +def degrees_to_string(d, precision=5, pad=False, sep=':', fields=3): + """ + Takes a decimal hour value and returns a string formatted as dms with + separator specified by the 'sep' parameter. + + ``d`` must be a scalar. + """ + d, m, s = degrees_to_dms(d) + return sexagesimal_to_string((d, m, s), precision=precision, pad=pad, + sep=sep, fields=fields) + + +def angular_separation(lon1, lat1, lon2, lat2): + """ + Angular separation between two points on a sphere. + + Parameters + ---------- + lon1, lat1, lon2, lat2 : `Angle`, `~astropy.units.Quantity` or float + Longitude and latitude of the two points. Quantities should be in + angular units; floats in radians. + + Returns + ------- + angular separation : `~astropy.units.Quantity` or float + Type depends on input; `Quantity` in angular units, or float in + radians. + + Notes + ----- + The angular separation is calculated using the Vincenty formula [1]_, + which is slightly more complex and computationally expensive than + some alternatives, but is stable at at all distances, including the + poles and antipodes. + + .. [1] http://en.wikipedia.org/wiki/Great-circle_distance + """ + + sdlon = np.sin(lon2 - lon1) + cdlon = np.cos(lon2 - lon1) + slat1 = np.sin(lat1) + slat2 = np.sin(lat2) + clat1 = np.cos(lat1) + clat2 = np.cos(lat2) + + num1 = clat2 * sdlon + num2 = clat1 * slat2 - slat1 * clat2 * cdlon + denominator = slat1 * slat2 + clat1 * clat2 * cdlon + + return np.arctan2(np.hypot(num1, num2), denominator) + + +def position_angle(lon1, lat1, lon2, lat2): + """ + Position Angle (East of North) between two points on a sphere. + + Parameters + ---------- + lon1, lat1, lon2, lat2 : `Angle`, `~astropy.units.Quantity` or float + Longitude and latitude of the two points. Quantities should be in + angular units; floats in radians. + + Returns + ------- + pa : `~astropy.coordinates.Angle` + The (positive) position angle of the vector pointing from position 1 to + position 2. If any of the angles are arrays, this will contain an array + following the appropriate `numpy` broadcasting rules. + + """ + from .angles import Angle + + deltalon = lon2 - lon1 + colat = np.cos(lat2) + + x = np.sin(lat2) * np.cos(lat1) - colat * np.sin(lat1) * np.cos(deltalon) + y = np.sin(deltalon) * colat + + return Angle(np.arctan2(y, x), u.radian).wrap_at(360*u.deg) diff --git a/astropy/coordinates/angles.py b/astropy/coordinates/angles.py new file mode 100644 index 0000000..b9c5145 --- /dev/null +++ b/astropy/coordinates/angles.py @@ -0,0 +1,667 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module contains the fundamental classes used for representing +coordinates in astropy. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import math +from collections import namedtuple + +import numpy as np + +from ..extern import six +from . import angle_utilities as util +from .. import units as u +from ..utils import isiterable + +__all__ = ['Angle', 'Latitude', 'Longitude'] + + +# these are used by the `hms` and `dms` attributes +hms_tuple = namedtuple('hms_tuple', ('h', 'm', 's')) +dms_tuple = namedtuple('dms_tuple', ('d', 'm', 's')) +signed_dms_tuple = namedtuple('signed_dms_tuple', ('sign', 'd', 'm', 's')) + + +class Angle(u.SpecificTypeQuantity): + """ + One or more angular value(s) with units equivalent to radians or degrees. + + An angle can be specified either as an array, scalar, tuple (see + below), string, `~astropy.units.Quantity` or another + :class:`~astropy.coordinates.Angle`. + + The input parser is flexible and supports a variety of formats:: + + Angle('10.2345d') + Angle(['10.2345d', '-20d']) + Angle('1:2:30.43 degrees') + Angle('1 2 0 hours') + Angle(np.arange(1, 8), unit=u.deg) + Angle('1°2′3″') + Angle('1d2m3.4s') + Angle('-1h2m3s') + Angle('-1h2.5m') + Angle('-1:2.5', unit=u.deg) + Angle((10, 11, 12), unit='hourangle') # (h, m, s) + Angle((-1, 2, 3), unit=u.deg) # (d, m, s) + Angle(10.2345 * u.deg) + Angle(Angle(10.2345 * u.deg)) + + Parameters + ---------- + angle : `~numpy.array`, scalar, `~astropy.units.Quantity`, :class:`~astropy.coordinates.Angle` + The angle value. If a tuple, will be interpreted as ``(h, m, + s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it + will be interpreted following the rules described above. + + If ``angle`` is a sequence or array of strings, the resulting + values will be in the given ``unit``, or if `None` is provided, + the unit will be taken from the first given value. + + unit : `~astropy.units.UnitBase`, str, optional + The unit of the value specified for the angle. This may be + any string that `~astropy.units.Unit` understands, but it is + better to give an actual unit object. Must be an angular + unit. + + dtype : `~numpy.dtype`, optional + See `~astropy.units.Quantity`. + + copy : bool, optional + See `~astropy.units.Quantity`. + + Raises + ------ + `~astropy.units.UnitsError` + If a unit is not provided or it is not an angular unit. + """ + _equivalent_unit = u.radian + _include_easy_conversion_members = True + + def __new__(cls, angle, unit=None, dtype=None, copy=True): + + if not isinstance(angle, u.Quantity): + if unit is not None: + unit = cls._convert_unit_to_angle_unit(u.Unit(unit)) + + if isinstance(angle, tuple): + angle = cls._tuple_to_float(angle, unit) + + elif isinstance(angle, six.string_types): + angle, angle_unit = util.parse_angle(angle, unit) + if angle_unit is None: + angle_unit = unit + + if isinstance(angle, tuple): + angle = cls._tuple_to_float(angle, angle_unit) + + if angle_unit is not unit: + # Possible conversion to `unit` will be done below. + angle = u.Quantity(angle, angle_unit, copy=False) + + elif (isiterable(angle) and + not (isinstance(angle, np.ndarray) and + angle.dtype.kind not in 'SUVO')): + angle = [Angle(x, unit, copy=False) for x in angle] + + return super(Angle, cls).__new__(cls, angle, unit, dtype=dtype, + copy=copy) + + @staticmethod + def _tuple_to_float(angle, unit): + """ + Converts an angle represented as a 3-tuple or 2-tuple into a floating + point number in the given unit. + """ + # TODO: Numpy array of tuples? + if unit == u.hourangle: + return util.hms_to_hours(*angle) + elif unit == u.degree: + return util.dms_to_degrees(*angle) + else: + raise u.UnitsError("Can not parse '{0}' as unit '{1}'" + .format(angle, unit)) + + @staticmethod + def _convert_unit_to_angle_unit(unit): + return u.hourangle if unit is u.hour else unit + + def _set_unit(self, unit): + super(Angle, self)._set_unit(self._convert_unit_to_angle_unit(unit)) + + @property + def hour(self): + """ + The angle's value in hours (read-only property). + """ + return self.hourangle + + @property + def hms(self): + """ + The angle's value in hours, as a named tuple with ``(h, m, s)`` + members. (This is a read-only property.) + """ + return hms_tuple(*util.hours_to_hms(self.hourangle)) + + @property + def dms(self): + """ + The angle's value in degrees, as a named tuple with ``(d, m, s)`` + members. (This is a read-only property.) + """ + return dms_tuple(*util.degrees_to_dms(self.degree)) + + @property + def signed_dms(self): + """ + The angle's value in degrees, as a named tuple with ``(sign, d, m, s)`` + members. The ``d``, ``m``, ``s`` are thus always positive, and the sign of + the angle is given by ``sign``. (This is a read-only property.) + + This is primarily intended for use with `dms` to generate string + representations of coordinates that are correct for negative angles. + """ + return signed_dms_tuple(np.sign(self.degree), + *util.degrees_to_dms(np.abs(self.degree))) + + def to_string(self, unit=None, decimal=False, sep='fromunit', + precision=None, alwayssign=False, pad=False, + fields=3, format=None): + """ A string representation of the angle. + + Parameters + ---------- + unit : `~astropy.units.UnitBase`, optional + Specifies the unit. Must be an angular unit. If not + provided, the unit used to initialize the angle will be + used. + + decimal : bool, optional + If `True`, a decimal representation will be used, otherwise + the returned string will be in sexagesimal form. + + sep : str, optional + The separator between numbers in a sexagesimal + representation. E.g., if it is ':', the result is + ``'12:41:11.1241'``. Also accepts 2 or 3 separators. E.g., + ``sep='hms'`` would give the result ``'12h41m11.1241s'``, or + sep='-:' would yield ``'11-21:17.124'``. Alternatively, the + special string 'fromunit' means 'dms' if the unit is + degrees, or 'hms' if the unit is hours. + + precision : int, optional + The level of decimal precision. If ``decimal`` is `True`, + this is the raw precision, otherwise it gives the + precision of the last place of the sexagesimal + representation (seconds). If `None`, or not provided, the + number of decimal places is determined by the value, and + will be between 0-8 decimal places as required. + + alwayssign : bool, optional + If `True`, include the sign no matter what. If `False`, + only include the sign if it is negative. + + pad : bool, optional + If `True`, include leading zeros when needed to ensure a + fixed number of characters for sexagesimal representation. + + fields : int, optional + Specifies the number of fields to display when outputting + sexagesimal notation. For example: + + - fields == 1: ``'5d'`` + - fields == 2: ``'5d45m'`` + - fields == 3: ``'5d45m32.5s'`` + + By default, all fields are displayed. + + format : str, optional + The format of the result. If not provided, an unadorned + string is returned. Supported values are: + + - 'latex': Return a LaTeX-formatted string + + - 'unicode': Return a string containing non-ASCII unicode + characters, such as the degree symbol + + Returns + ------- + strrepr : str or array + A string representation of the angle. If the angle is an array, this + will be an array with a unicode dtype. + + + """ + if unit is None: + unit = self.unit + else: + unit = self._convert_unit_to_angle_unit(u.Unit(unit)) + + separators = { + None: { + u.degree: 'dms', + u.hourangle: 'hms'}, + 'latex': { + u.degree: [r'^\circ', r'{}^\prime', r'{}^{\prime\prime}'], + u.hourangle: [r'^\mathrm{h}', r'^\mathrm{m}', r'^\mathrm{s}']}, + 'unicode': { + u.degree: '°′″', + u.hourangle: 'ʰᵐˢ'} + } + + if sep == 'fromunit': + if format not in separators: + raise ValueError("Unknown format '{0}'".format(format)) + seps = separators[format] + if unit in seps: + sep = seps[unit] + + # Create an iterator so we can format each element of what + # might be an array. + if unit is u.degree: + if decimal: + values = self.degree + if precision is not None: + func = ("{0:0." + str(precision) + "f}").format + else: + func = '{0:g}'.format + else: + if sep == 'fromunit': + sep = 'dms' + values = self.degree + func = lambda x: util.degrees_to_string( + x, precision=precision, sep=sep, pad=pad, + fields=fields) + + elif unit is u.hourangle: + if decimal: + values = self.hour + if precision is not None: + func = ("{0:0." + str(precision) + "f}").format + else: + func = '{0:g}'.format + else: + if sep == 'fromunit': + sep = 'hms' + values = self.hour + func = lambda x: util.hours_to_string( + x, precision=precision, sep=sep, pad=pad, + fields=fields) + + elif unit.is_equivalent(u.radian): + if decimal: + values = self.to_value(unit) + if precision is not None: + func = ("{0:1." + str(precision) + "f}").format + else: + func = "{0:g}".format + elif sep == 'fromunit': + values = self.to_value(unit) + unit_string = unit.to_string(format=format) + if format == 'latex': + unit_string = unit_string[1:-1] + + if precision is not None: + def plain_unit_format(val): + return ("{0:0." + str(precision) + "f}{1}").format( + val, unit_string) + func = plain_unit_format + else: + def plain_unit_format(val): + return "{0:g}{1}".format(val, unit_string) + func = plain_unit_format + else: + raise ValueError( + "'{0}' can not be represented in sexagesimal " + "notation".format( + unit.name)) + + else: + raise u.UnitsError( + "The unit value provided is not an angular unit.") + + def do_format(val): + s = func(float(val)) + if alwayssign and not s.startswith('-'): + s = '+' + s + if format == 'latex': + s = '${0}$'.format(s) + return s + + format_ufunc = np.vectorize(do_format, otypes=['U']) + result = format_ufunc(values) + + if result.ndim == 0: + result = result[()] + return result + + def wrap_at(self, wrap_angle, inplace=False): + """ + Wrap the `Angle` object at the given ``wrap_angle``. + + This method forces all the angle values to be within a contiguous + 360 degree range so that ``wrap_angle - 360d <= angle < + wrap_angle``. By default a new Angle object is returned, but if the + ``inplace`` argument is `True` then the `Angle` object is wrapped in + place and nothing is returned. + + For instance:: + + >>> from astropy.coordinates import Angle + >>> import astropy.units as u + >>> a = Angle([-20.0, 150.0, 350.0] * u.deg) + + >>> a.wrap_at(360 * u.deg).degree # Wrap into range 0 to 360 degrees # doctest: +FLOAT_CMP + array([340., 150., 350.]) + + >>> a.wrap_at('180d', inplace=True) # Wrap into range -180 to 180 degrees # doctest: +FLOAT_CMP + >>> a.degree # doctest: +FLOAT_CMP + array([-20., 150., -10.]) + + Parameters + ---------- + wrap_angle : str, `Angle`, angular `~astropy.units.Quantity` + Specifies a single value for the wrap angle. This can be any + object that can initialize an `Angle` object, e.g. ``'180d'``, + ``180 * u.deg``, or ``Angle(180, unit=u.deg)``. + + inplace : bool + If `True` then wrap the object in place instead of returning + a new `Angle` + + Returns + ------- + out : Angle or `None` + If ``inplace is False`` (default), return new `Angle` object + with angles wrapped accordingly. Otherwise wrap in place and + return `None`. + """ + wrap_angle = Angle(wrap_angle) # Convert to an Angle + wrapped = np.mod(self - wrap_angle, 360.0 * u.deg) - (360.0 * u.deg - wrap_angle) + + if inplace: + self[()] = wrapped + else: + return wrapped + + def is_within_bounds(self, lower=None, upper=None): + """ + Check if all angle(s) satisfy ``lower <= angle < upper`` + + If ``lower`` is not specified (or `None`) then no lower bounds check is + performed. Likewise ``upper`` can be left unspecified. For example:: + + >>> from astropy.coordinates import Angle + >>> import astropy.units as u + >>> a = Angle([-20, 150, 350] * u.deg) + >>> a.is_within_bounds('0d', '360d') + False + >>> a.is_within_bounds(None, '360d') + True + >>> a.is_within_bounds(-30 * u.deg, None) + True + + Parameters + ---------- + lower : str, `Angle`, angular `~astropy.units.Quantity`, `None` + Specifies lower bound for checking. This can be any object + that can initialize an `Angle` object, e.g. ``'180d'``, + ``180 * u.deg``, or ``Angle(180, unit=u.deg)``. + upper : str, `Angle`, angular `~astropy.units.Quantity`, `None` + Specifies upper bound for checking. This can be any object + that can initialize an `Angle` object, e.g. ``'180d'``, + ``180 * u.deg``, or ``Angle(180, unit=u.deg)``. + + Returns + ------- + is_within_bounds : bool + `True` if all angles satisfy ``lower <= angle < upper`` + """ + ok = True + if lower is not None: + ok &= np.all(Angle(lower) <= self) + if ok and upper is not None: + ok &= np.all(self < Angle(upper)) + return bool(ok) + + def __str__(self): + return str(self.to_string()) + + def _repr_latex_(self): + if self.isscalar: + return self.to_string(format='latex') + else: + # Need to do a magic incantation to convert to str. Regular str + # or array2string causes all backslashes to get doubled. + return np.array2string(self.to_string(format='latex'), + formatter={'str_kind': lambda x: x}) + + +def _no_angle_subclass(obj): + """Return any Angle subclass objects as an Angle objects. + + This is used to ensure that Latitute and Longitude change to Angle + objects when they are used in calculations (such as lon/2.) + """ + if isinstance(obj, tuple): + return tuple(_no_angle_subclass(_obj) for _obj in obj) + + return obj.view(Angle) if isinstance(obj, Angle) else obj + + +class Latitude(Angle): + """ + Latitude-like angle(s) which must be in the range -90 to +90 deg. + + A Latitude object is distinguished from a pure + :class:`~astropy.coordinates.Angle` by virtue of being constrained + so that:: + + -90.0 * u.deg <= angle(s) <= +90.0 * u.deg + + Any attempt to set a value outside that range will result in a + `ValueError`. + + The input angle(s) can be specified either as an array, list, + scalar, tuple (see below), string, + :class:`~astropy.units.Quantity` or another + :class:`~astropy.coordinates.Angle`. + + The input parser is flexible and supports all of the input formats + supported by :class:`~astropy.coordinates.Angle`. + + Parameters + ---------- + angle : array, list, scalar, `~astropy.units.Quantity`, `Angle`. The + angle value(s). If a tuple, will be interpreted as ``(h, m, s)`` or + ``(d, m, s)`` depending on ``unit``. If a string, it will be + interpreted following the rules described for + :class:`~astropy.coordinates.Angle`. + + If ``angle`` is a sequence or array of strings, the resulting + values will be in the given ``unit``, or if `None` is provided, + the unit will be taken from the first given value. + + unit : :class:`~astropy.units.UnitBase`, str, optional + The unit of the value specified for the angle. This may be + any string that `~astropy.units.Unit` understands, but it is + better to give an actual unit object. Must be an angular + unit. + + Raises + ------ + `~astropy.units.UnitsError` + If a unit is not provided or it is not an angular unit. + `TypeError` + If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`. + """ + def __new__(cls, angle, unit=None, **kwargs): + # Forbid creating a Lat from a Long. + if isinstance(angle, Longitude): + raise TypeError("A Latitude angle cannot be created from a Longitude angle") + self = super(Latitude, cls).__new__(cls, angle, unit=unit, **kwargs) + self._validate_angles() + return self + + def _validate_angles(self, angles=None): + """Check that angles are between -90 and 90 degrees. + If not given, the check is done on the object itself""" + # Convert the lower and upper bounds to the "native" unit of + # this angle. This limits multiplication to two values, + # rather than the N values in `self.value`. Also, the + # comparison is performed on raw arrays, rather than Quantity + # objects, for speed. + if angles is None: + angles = self + lower = u.degree.to(angles.unit, -90.0) + upper = u.degree.to(angles.unit, 90.0) + if np.any(angles.value < lower) or np.any(angles.value > upper): + raise ValueError('Latitude angle(s) must be within -90 deg <= angle <= 90 deg, ' + 'got {0}'.format(angles.to(u.degree))) + + def __setitem__(self, item, value): + # Forbid assigning a Long to a Lat. + if isinstance(value, Longitude): + raise TypeError("A Longitude angle cannot be assigned to a Latitude angle") + # first check bounds + self._validate_angles(value) + super(Latitude, self).__setitem__(item, value) + + # Any calculation should drop to Angle + def __array_wrap__(self, obj, context=None): + obj = super(Angle, self).__array_wrap__(obj, context=context) + return _no_angle_subclass(obj) + + def __array_ufunc__(self, *args, **kwargs): + results = super(Latitude, self).__array_ufunc__(*args, **kwargs) + return _no_angle_subclass(results) + + +class LongitudeInfo(u.QuantityInfo): + _represent_as_dict_attrs = u.QuantityInfo._represent_as_dict_attrs + ('wrap_angle',) + + +class Longitude(Angle): + """ + Longitude-like angle(s) which are wrapped within a contiguous 360 degree range. + + A ``Longitude`` object is distinguished from a pure + :class:`~astropy.coordinates.Angle` by virtue of a ``wrap_angle`` + property. The ``wrap_angle`` specifies that all angle values + represented by the object will be in the range:: + + wrap_angle - 360 * u.deg <= angle(s) < wrap_angle + + The default ``wrap_angle`` is 360 deg. Setting ``wrap_angle=180 * + u.deg`` would instead result in values between -180 and +180 deg. + Setting the ``wrap_angle`` attribute of an existing ``Longitude`` + object will result in re-wrapping the angle values in-place. + + The input angle(s) can be specified either as an array, list, + scalar, tuple, string, :class:`~astropy.units.Quantity` + or another :class:`~astropy.coordinates.Angle`. + + The input parser is flexible and supports all of the input formats + supported by :class:`~astropy.coordinates.Angle`. + + Parameters + ---------- + angle : array, list, scalar, `~astropy.units.Quantity`, + :class:`~astropy.coordinates.Angle` The angle value(s). If a tuple, + will be interpreted as ``(h, m s)`` or ``(d, m, s)`` depending + on ``unit``. If a string, it will be interpreted following the + rules described for :class:`~astropy.coordinates.Angle`. + + If ``angle`` is a sequence or array of strings, the resulting + values will be in the given ``unit``, or if `None` is provided, + the unit will be taken from the first given value. + + unit : :class:`~astropy.units.UnitBase`, str, optional + The unit of the value specified for the angle. This may be + any string that `~astropy.units.Unit` understands, but it is + better to give an actual unit object. Must be an angular + unit. + + wrap_angle : :class:`~astropy.coordinates.Angle` or equivalent, or None + Angle at which to wrap back to ``wrap_angle - 360 deg``. + If ``None`` (default), it will be taken to be 360 deg unless ``angle`` + has a ``wrap_angle`` attribute already (i.e., is a ``Longitude``), + in which case it will be taken from there. + + Raises + ------ + `~astropy.units.UnitsError` + If a unit is not provided or it is not an angular unit. + `TypeError` + If the angle parameter is an instance of :class:`~astropy.coordinates.Latitude`. + """ + + _wrap_angle = None + _default_wrap_angle = Angle(360 * u.deg) + info = LongitudeInfo() + + def __new__(cls, angle, unit=None, wrap_angle=None, **kwargs): + # Forbid creating a Long from a Lat. + if isinstance(angle, Latitude): + raise TypeError("A Longitude angle cannot be created from " + "a Latitude angle.") + self = super(Longitude, cls).__new__(cls, angle, unit=unit, **kwargs) + if wrap_angle is None: + wrap_angle = getattr(angle, 'wrap_angle', self._default_wrap_angle) + self.wrap_angle = wrap_angle + return self + + def __setitem__(self, item, value): + # Forbid assigning a Lat to a Long. + if isinstance(value, Latitude): + raise TypeError("A Latitude angle cannot be assigned to a Longitude angle") + super(Longitude, self).__setitem__(item, value) + self._wrap_internal() + + def _wrap_internal(self): + """ + Wrap the internal values in the Longitude object. Using the + :meth:`~astropy.coordinates.Angle.wrap_at` method causes + recursion. + """ + # Convert the wrap angle and 360 degrees to the native unit of + # this Angle, then do all the math on raw Numpy arrays rather + # than Quantity objects for speed. + a360 = u.degree.to(self.unit, 360.0) + wrap_angle = self.wrap_angle.to_value(self.unit) + wrap_angle_floor = wrap_angle - a360 + self_angle = self.value + # Do the wrapping, but only if any angles need to be wrapped + if np.any(self_angle < wrap_angle_floor) or np.any(self_angle >= wrap_angle): + wrapped = np.mod(self_angle - wrap_angle, a360) + wrap_angle_floor + value = u.Quantity(wrapped, self.unit) + super(Longitude, self).__setitem__((), value) + + @property + def wrap_angle(self): + return self._wrap_angle + + @wrap_angle.setter + def wrap_angle(self, value): + self._wrap_angle = Angle(value) + self._wrap_internal() + + def __array_finalize__(self, obj): + super(Longitude, self).__array_finalize__(obj) + self._wrap_angle = getattr(obj, '_wrap_angle', + self._default_wrap_angle) + + # Any calculation should drop to Angle + def __array_wrap__(self, obj, context=None): + obj = super(Angle, self).__array_wrap__(obj, context=context) + return _no_angle_subclass(obj) + + def __array_ufunc__(self, *args, **kwargs): + results = super(Longitude, self).__array_ufunc__(*args, **kwargs) + return _no_angle_subclass(results) diff --git a/astropy/coordinates/attributes.py b/astropy/coordinates/attributes.py new file mode 100644 index 0000000..f37fed2 --- /dev/null +++ b/astropy/coordinates/attributes.py @@ -0,0 +1,528 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +# Dependencies +import numpy as np +import warnings + +# Project +from .. import units as u +from ..utils.compat.numpy import broadcast_to as np_broadcast_to +from ..utils.exceptions import AstropyDeprecationWarning +from ..utils import OrderedDescriptor, ShapedLikeNDArray + +__all__ = ['Attribute', 'TimeAttribute', 'QuantityAttribute', + 'EarthLocationAttribute', 'CoordinateAttribute', + 'CartesianRepresentationAttribute', + 'DifferentialAttribute'] + + +class Attribute(OrderedDescriptor): + """A non-mutable data descriptor to hold a frame attribute. + + This class must be used to define frame attributes (e.g. ``equinox`` or + ``obstime``) that are included in a frame class definition. + + Examples + -------- + The `~astropy.coordinates.FK4` class uses the following class attributes:: + + class FK4(BaseCoordinateFrame): + equinox = TimeAttribute(default=_EQUINOX_B1950) + obstime = TimeAttribute(default=None, + secondary_attribute='equinox') + + This means that ``equinox`` and ``obstime`` are available to be set as + keyword arguments when creating an ``FK4`` class instance and are then + accessible as instance attributes. The instance value for the attribute + must be stored in ``'_' + `` by the frame ``__init__`` + method. + + Note in this example that ``equinox`` and ``obstime`` are time attributes + and use the ``TimeAttributeFrame`` class. This subclass overrides the + ``convert_input`` method to validate and convert inputs into a ``Time`` + object. + + Parameters + ---------- + default : object + Default value for the attribute if not provided + secondary_attribute : str + Name of a secondary instance attribute which supplies the value if + ``default is None`` and no value was supplied during initialization. + """ + + _class_attribute_ = 'frame_attributes' + _name_attribute_ = 'name' + name = '' + + def __init__(self, default=None, secondary_attribute=''): + self.default = default + self.secondary_attribute = secondary_attribute + super(Attribute, self).__init__() + + def convert_input(self, value): + """ + Validate the input ``value`` and convert to expected attribute class. + + The base method here does nothing, but subclasses can implement this + as needed. The method should catch any internal exceptions and raise + ValueError with an informative message. + + The method returns the validated input along with a boolean that + indicates whether the input value was actually converted. If the input + value was already the correct type then the ``converted`` return value + should be ``False``. + + Parameters + ---------- + value : object + Input value to be converted. + + Returns + ------- + output_value + The ``value`` converted to the correct type (or just ``value`` if + ``converted`` is False) + converted : bool + True if the conversion was actually performed, False otherwise. + + Raises + ------ + ValueError + If the input is not valid for this attribute. + """ + return value, False + + def __get__(self, instance, frame_cls=None): + if instance is None: + out = self.default + else: + out = getattr(instance, '_' + self.name, self.default) + if out is None: + out = getattr(instance, self.secondary_attribute, self.default) + + out, converted = self.convert_input(out) + if instance is not None: + instance_shape = getattr(instance, 'shape', None) + if instance_shape is not None and (getattr(out, 'size', 1) > 1 and + out.shape != instance_shape): + # If the shapes do not match, try broadcasting. + try: + if isinstance(out, ShapedLikeNDArray): + out = out._apply(np_broadcast_to, shape=instance_shape, + subok=True) + else: + out = np_broadcast_to(out, instance_shape, subok=True) + except ValueError: + # raise more informative exception. + raise ValueError( + "attribute {0} should be scalar or have shape {1}, " + "but is has shape {2} and could not be broadcast." + .format(self.name, instance_shape, out.shape)) + + converted = True + + if converted: + setattr(instance, '_' + self.name, out) + + return out + + def __set__(self, instance, val): + raise AttributeError('Cannot set frame attribute') + + +class TimeAttribute(Attribute): + """ + Frame attribute descriptor for quantities that are Time objects. + See the `~astropy.coordinates.Attribute` API doc for further + information. + + Parameters + ---------- + default : object + Default value for the attribute if not provided + secondary_attribute : str + Name of a secondary instance attribute which supplies the value if + ``default is None`` and no value was supplied during initialization. + """ + + def convert_input(self, value): + """ + Convert input value to a Time object and validate by running through + the Time constructor. Also check that the input was a scalar. + + Parameters + ---------- + value : object + Input value to be converted. + + Returns + ------- + out, converted : correctly-typed object, boolean + Tuple consisting of the correctly-typed object and a boolean which + indicates if conversion was actually performed. + + Raises + ------ + ValueError + If the input is not valid for this attribute. + """ + + from ..time import Time + + if value is None: + return None, False + + if isinstance(value, Time): + out = value + converted = False + else: + try: + out = Time(value) + except Exception as err: + raise ValueError( + 'Invalid time input {0}={1!r}\n{2}'.format(self.name, + value, err)) + converted = True + + return out, converted + + +class CartesianRepresentationAttribute(Attribute): + """ + A frame attribute that is a CartesianRepresentation with specified units. + + Parameters + ---------- + default : object + Default value for the attribute if not provided + secondary_attribute : str + Name of a secondary instance attribute which supplies the value if + ``default is None`` and no value was supplied during initialization. + unit : unit object or None + Name of a unit that the input will be converted into. If None, no + unit-checking or conversion is performed + """ + + def __init__(self, default=None, secondary_attribute='', unit=None): + super(CartesianRepresentationAttribute, self).__init__( + default, secondary_attribute) + self.unit = unit + + def convert_input(self, value): + """ + Checks that the input is a CartesianRepresentation with the correct + unit, or the special value ``[0, 0, 0]``. + + Parameters + ---------- + value : object + Input value to be converted. + + Returns + ------- + out, converted : correctly-typed object, boolean + Tuple consisting of the correctly-typed object and a boolean which + indicates if conversion was actually performed. + + Raises + ------ + ValueError + If the input is not valid for this attribute. + """ + + if (isinstance(value, list) and len(value) == 3 and + all(v == 0 for v in value) and self.unit is not None): + return CartesianRepresentation(np.zeros(3) * self.unit), True + else: + # is it a CartesianRepresentation with correct unit? + if hasattr(value, 'xyz') and value.xyz.unit == self.unit: + return value, False + + converted = True + # if it's a CartesianRepresentation, get the xyz Quantity + value = getattr(value, 'xyz', value) + if not hasattr(value, 'unit'): + raise TypeError('tried to set a {0} with something that does ' + 'not have a unit.' + .format(self.__class__.__name__)) + + value = value.to(self.unit) + + # now try and make a CartesianRepresentation. + cartrep = CartesianRepresentation(value, copy=False) + return cartrep, converted + + +class QuantityAttribute(Attribute): + """ + A frame attribute that is a quantity with specified units and shape + (optionally). + + Parameters + ---------- + default : object + Default value for the attribute if not provided + secondary_attribute : str + Name of a secondary instance attribute which supplies the value if + ``default is None`` and no value was supplied during initialization. + unit : unit object or None + Name of a unit that the input will be converted into. If None, no + unit-checking or conversion is performed + shape : tuple or None + If given, specifies the shape the attribute must be + """ + + def __init__(self, default=None, secondary_attribute='', unit=None, shape=None): + super(QuantityAttribute, self).__init__(default, secondary_attribute) + self.unit = unit + self.shape = shape + + def convert_input(self, value): + """ + Checks that the input is a Quantity with the necessary units (or the + special value ``0``). + + Parameters + ---------- + value : object + Input value to be converted. + + Returns + ------- + out, converted : correctly-typed object, boolean + Tuple consisting of the correctly-typed object and a boolean which + indicates if conversion was actually performed. + + Raises + ------ + ValueError + If the input is not valid for this attribute. + """ + if np.all(value == 0) and self.unit is not None: + return u.Quantity(np.zeros(self.shape), self.unit), True + else: + if not hasattr(value, 'unit'): + raise TypeError('Tried to set a QuantityAttribute with ' + 'something that does not have a unit.') + oldvalue = value + value = u.Quantity(oldvalue, self.unit, copy=False) + if self.shape is not None and value.shape != self.shape: + raise ValueError('The provided value has shape "{0}", but ' + 'should have shape "{1}"'.format(value.shape, + self.shape)) + converted = oldvalue is not value + return value, converted + +class EarthLocationAttribute(Attribute): + """ + A frame attribute that can act as a `~astropy.coordinates.EarthLocation`. + It can be created as anything that can be transformed to the + `~astropy.coordinates.ITRS` frame, but always presents as an `EarthLocation` + when accessed after creation. + + Parameters + ---------- + default : object + Default value for the attribute if not provided + secondary_attribute : str + Name of a secondary instance attribute which supplies the value if + ``default is None`` and no value was supplied during initialization. + """ + + def convert_input(self, value): + """ + Checks that the input is a Quantity with the necessary units (or the + special value ``0``). + + Parameters + ---------- + value : object + Input value to be converted. + + Returns + ------- + out, converted : correctly-typed object, boolean + Tuple consisting of the correctly-typed object and a boolean which + indicates if conversion was actually performed. + + Raises + ------ + ValueError + If the input is not valid for this attribute. + """ + + if value is None: + return None, False + elif isinstance(value, EarthLocation): + return value, False + else: + # we have to do the import here because of some tricky circular deps + from .builtin_frames import ITRS + + if not hasattr(value, 'transform_to'): + raise ValueError('"{0}" was passed into an ' + 'EarthLocationAttribute, but it does not have ' + '"transform_to" method'.format(value)) + itrsobj = value.transform_to(ITRS) + return itrsobj.earth_location, True + + +class CoordinateAttribute(Attribute): + """ + A frame attribute which is a coordinate object. It can be given as a + low-level frame class *or* a `~astropy.coordinates.SkyCoord`, but will + always be converted to the low-level frame class when accessed. + + Parameters + ---------- + frame : a coordinate frame class + The type of frame this attribute can be + default : object + Default value for the attribute if not provided + secondary_attribute : str + Name of a secondary instance attribute which supplies the value if + ``default is None`` and no value was supplied during initialization. + """ + + def __init__(self, frame, default=None, secondary_attribute=''): + self._frame = frame + super(CoordinateAttribute, self).__init__(default, secondary_attribute) + + def convert_input(self, value): + """ + Checks that the input is a SkyCoord with the necessary units (or the + special value ``None``). + + Parameters + ---------- + value : object + Input value to be converted. + + Returns + ------- + out, converted : correctly-typed object, boolean + Tuple consisting of the correctly-typed object and a boolean which + indicates if conversion was actually performed. + + Raises + ------ + ValueError + If the input is not valid for this attribute. + """ + if value is None: + return None, False + elif isinstance(value, self._frame): + return value, False + else: + if not hasattr(value, 'transform_to'): + raise ValueError('"{0}" was passed into a ' + 'CoordinateAttribute, but it does not have ' + '"transform_to" method'.format(value)) + transformedobj = value.transform_to(self._frame) + if hasattr(transformedobj, 'frame'): + transformedobj = transformedobj.frame + return transformedobj, True + + +class DifferentialAttribute(Attribute): + """A frame attribute which is a differential instance. + + The optional ``allowed_classes`` argument allows specifying a restricted + set of valid differential classes to check the input against. Otherwise, + any `~astropy.coordinates.BaseDifferential` subclass instance is valid. + + Parameters + ---------- + default : object + Default value for the attribute if not provided + allowed_classes : tuple, optional + A list of allowed differential classes for this attribute to have. + secondary_attribute : str + Name of a secondary instance attribute which supplies the value if + ``default is None`` and no value was supplied during initialization. + """ + + def __init__(self, default=None, allowed_classes=None, + secondary_attribute=''): + + if allowed_classes is not None: + self.allowed_classes = tuple(allowed_classes) + else: + self.allowed_classes = BaseDifferential + + super(DifferentialAttribute, self).__init__(default, + secondary_attribute) + + def convert_input(self, value): + """ + Checks that the input is a differential object and is one of the + allowed class types. + + Parameters + ---------- + value : object + Input value. + + Returns + ------- + out, converted : correctly-typed object, boolean + Tuple consisting of the correctly-typed object and a boolean which + indicates if conversion was actually performed. + + Raises + ------ + ValueError + If the input is not valid for this attribute. + """ + + if not isinstance(value, self.allowed_classes): + raise TypeError('Tried to set a DifferentialAttribute with ' + 'an unsupported Differential type {0}. Allowed ' + 'classes are: {1}' + .format(value.__class__, + self.allowed_classes)) + + return value, True + + +# Backwards-compatibility: these are the only classes that were previously +# released in v1.3 +class FrameAttribute(Attribute): + + def __init__(self, *args, **kwargs): + warnings.warn("FrameAttribute has been renamed to Attribute.", + AstropyDeprecationWarning) + super(FrameAttribute, self).__init__(*args, **kwargs) + +class TimeFrameAttribute(TimeAttribute): + + def __init__(self, *args, **kwargs): + warnings.warn("TimeFrameAttribute has been renamed to TimeAttribute.", + AstropyDeprecationWarning) + super(TimeFrameAttribute, self).__init__(*args, **kwargs) + +class QuantityFrameAttribute(QuantityAttribute): + + def __init__(self, *args, **kwargs): + warnings.warn("QuantityFrameAttribute has been renamed to " + "QuantityAttribute.", AstropyDeprecationWarning) + super(QuantityFrameAttribute, self).__init__(*args, **kwargs) + +class CartesianRepresentationFrameAttribute(CartesianRepresentationAttribute): + + def __init__(self, *args, **kwargs): + warnings.warn("CartesianRepresentationFrameAttribute has been renamed " + "to CartesianRepresentationAttribute.", + AstropyDeprecationWarning) + super(CartesianRepresentationFrameAttribute, self).__init__( + *args, **kwargs) + + +# do this here to prevent a series of complicated circular imports +from .earth import EarthLocation +from .representation import CartesianRepresentation, BaseDifferential diff --git a/astropy/coordinates/baseframe.py b/astropy/coordinates/baseframe.py new file mode 100644 index 0000000..8f66acc --- /dev/null +++ b/astropy/coordinates/baseframe.py @@ -0,0 +1,1415 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Framework and base classes for coordinate frames/"low-level" coordinate +classes. +""" + +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +# Standard library +import abc +import copy +import inspect +from collections import namedtuple, OrderedDict, defaultdict +import warnings + +# Dependencies +import numpy as np + +# Project +from ..utils.compat.misc import override__dir__ +from ..utils.decorators import lazyproperty +from ..utils.exceptions import AstropyWarning +from ..extern import six +from ..extern.six.moves import zip +from .. import units as u +from ..utils import (OrderedDescriptorContainer, ShapedLikeNDArray, + check_broadcast) +from .transformations import TransformGraph +from . import representation as r + +from .attributes import Attribute + +# Import old names for Attributes so we don't break backwards-compatibility +# (some users rely on them being here, although that is not encouraged, as this +# is not the public API location -- see attributes.py). +from .attributes import ( + TimeFrameAttribute, QuantityFrameAttribute, + EarthLocationAttribute, CoordinateAttribute, + CartesianRepresentationFrameAttribute) # pylint: disable=W0611 + + +__all__ = ['BaseCoordinateFrame', 'frame_transform_graph', + 'GenericFrame', 'RepresentationMapping'] + + +# the graph used for all transformations between frames +frame_transform_graph = TransformGraph() + + +def _get_repr_cls(value): + """ + Return a valid representation class from ``value`` or raise exception. + """ + + if value in r.REPRESENTATION_CLASSES: + value = r.REPRESENTATION_CLASSES[value] + elif (not isinstance(value, type) or + not issubclass(value, r.BaseRepresentation)): + raise ValueError( + 'Representation is {0!r} but must be a BaseRepresentation class ' + 'or one of the string aliases {1}'.format( + value, list(r.REPRESENTATION_CLASSES))) + return value + + +def _get_repr_classes(base, **differentials): + """Get valid representation and differential classes. + + Parameters + ---------- + base : str or `~astropy.coordinates.BaseRepresentation` subclass + class for the representation of the base coordinates. If a string, + it is looked up among the known representation classes. + **differentials : dict of str or `~astropy.coordinates.BaseDifferentials` + Keys are like for normal differentials, i.e., 's' for a first + derivative in time, etc. If an item is set to `None`, it will be + guessed from the base class. + + Returns + ------- + repr_classes : dict of subclasses + The base class is keyed by 'base'; the others by the keys of + ``diffferentials``. + """ + base = _get_repr_cls(base) + repr_classes = {'base': base} + + for name, differential_cls in differentials.items(): + if differential_cls == 'base': + # We don't want to fail for this case. + differential_cls = r.DIFFERENTIAL_CLASSES.get(base.get_name(), None) + + elif differential_cls in r.DIFFERENTIAL_CLASSES: + differential_cls = r.DIFFERENTIAL_CLASSES[differential_cls] + + elif (differential_cls is not None and + (not isinstance(differential_cls, type) or + not issubclass(differential_cls, r.BaseDifferential))): + raise ValueError( + 'Differential is {0!r} but must be a BaseDifferential class ' + 'or one of the string aliases {1}'.format( + differential_cls, list(r.DIFFERENTIAL_CLASSES))) + repr_classes[name] = differential_cls + return repr_classes + + +# Need to subclass ABCMeta as well, so that this meta class can be combined +# with ShapedLikeNDArray below (which is an ABC); without it, one gets +# "TypeError: metaclass conflict: the metaclass of a derived class must be a +# (non-strict) subclass of the metaclasses of all its bases" +class FrameMeta(OrderedDescriptorContainer, abc.ABCMeta): + def __new__(mcls, name, bases, members): + if 'default_representation' in members: + default_repr = members.pop('default_representation') + found_default_repr = True + else: + default_repr = None + found_default_repr = False + + if 'default_differential' in members: + default_diff = members.pop('default_differential') + found_default_diff = True + else: + default_diff = None + found_default_diff = False + + if 'frame_specific_representation_info' in members: + repr_info = members.pop('frame_specific_representation_info') + found_repr_info = True + else: + repr_info = None + found_repr_info = False + + # somewhat hacky, but this is the best way to get the MRO according to + # https://mail.python.org/pipermail/python-list/2002-December/167861.html + tmp_cls = super(FrameMeta, mcls).__new__(mcls, name, bases, members) + + # now look through the whole MRO for the class attributes, raw for + # frame_attr_names, and leading underscore for others + for m in (c.__dict__ for c in tmp_cls.__mro__): + if not found_default_repr and '_default_representation' in m: + default_repr = m['_default_representation'] + found_default_repr = True + + if not found_default_diff and '_default_differential' in m: + default_diff = m['_default_differential'] + found_default_diff = True + + if (not found_repr_info and + '_frame_specific_representation_info' in m): + repr_info = m['_frame_specific_representation_info'] + found_repr_info = True + + if found_default_repr and found_default_diff and found_repr_info: + break + else: + raise ValueError( + 'Could not find all expected BaseCoordinateFrame class ' + 'attributes. Are you mis-using FrameMeta?') + + # Make read-only properties for the frame class attributes that should + # be read-only to make them immutable after creation. + # We copy attributes instead of linking to make sure there's no + # accidental cross-talk between classes + mcls.readonly_prop_factory(members, 'default_representation', + default_repr) + mcls.readonly_prop_factory(members, 'default_differential', + default_diff) + mcls.readonly_prop_factory(members, + 'frame_specific_representation_info', + copy.deepcopy(repr_info)) + + # now set the frame name as lower-case class name, if it isn't explicit + if 'name' not in members: + members['name'] = name.lower() + + return super(FrameMeta, mcls).__new__(mcls, name, bases, members) + + @staticmethod + def readonly_prop_factory(members, attr, value): + private_attr = '_' + attr + + def getter(self): + return getattr(self, private_attr) + + members[private_attr] = value + members[attr] = property(getter) + + +_RepresentationMappingBase = \ + namedtuple('RepresentationMapping', + ('reprname', 'framename', 'defaultunit')) + + +class RepresentationMapping(_RepresentationMappingBase): + """ + This `~collections.namedtuple` is used with the + ``frame_specific_representation_info`` attribute to tell frames what + attribute names (and default units) to use for a particular representation. + ``reprname`` and ``framename`` should be strings, while ``defaultunit`` can + be either an astropy unit, the string ``'recommended'`` (to use whatever + the representation's ``recommended_units`` is), or None (to indicate that + no unit mapping should be done). + """ + + def __new__(cls, reprname, framename, defaultunit='recommended'): + # this trick just provides some defaults + return super(RepresentationMapping, cls).__new__(cls, reprname, + framename, + defaultunit) + + +@six.add_metaclass(FrameMeta) +class BaseCoordinateFrame(ShapedLikeNDArray): + """ + The base class for coordinate frames. + + This class is intended to be subclassed to create instances of specific + systems. Subclasses can implement the following attributes: + + * `default_representation` + A subclass of `~astropy.coordinates.BaseRepresentation` that will be + treated as the default representation of this frame. This is the + representation assumed by default when the frame is created. + + * `default_differential` + A subclass of `~astropy.coordinates.BaseDifferential` that will be + treated as the default differential class of this frame. This is the + differential class assumed by default when the frame is created. + + * `~astropy.coordinates.Attribute` class attributes + Frame attributes such as ``FK4.equinox`` or ``FK4.obstime`` are defined + using a descriptor class. See the narrative documentation or + built-in classes code for details. + + * `frame_specific_representation_info` + A dictionary mapping the name or class of a representation to a list of + `~astropy.coordinates.RepresentationMapping` objects that tell what + names and default units should be used on this frame for the components + of that representation. + + Parameters + ---------- + representation : `BaseRepresentation` or None + A representation object or `None` to have no data (or use the other + arguments) + *args, **kwargs + Coordinates, with names that depend on the subclass. + differential_cls : `BaseDifferential`, dict, optional + A differential class or dictionary of differential classes (currently + only a velocity differential with key 's' is supported). This sets + the expected input differential class, thereby changing the expected + keyword arguments of the data passed in. For example, passing + ``differential_cls=CartesianDifferential`` will make the classes + expect velocity data with the argument names ``v_x, v_y, v_z``. + copy : bool, optional + If `True` (default), make copies of the input coordinate arrays. + Can only be passed in as a keyword argument. + """ + + default_representation = None + default_differential = None + + # Specifies special names and units for representation and differential + # attributes. + frame_specific_representation_info = {} + + _inherit_descriptors_ = (Attribute,) + + frame_attributes = OrderedDict() + # Default empty frame_attributes dict + + def __init__(self, *args, **kwargs): + copy = kwargs.pop('copy', True) + self._attr_names_with_defaults = [] + + # TODO: we should be able to deal with an instance, not just a + # class or string. + representation = kwargs.pop('representation', None) + differential_cls = kwargs.pop('differential_cls', None) + + if representation is not None or differential_cls is not None: + + if representation is None: + representation = self.default_representation + + if (inspect.isclass(differential_cls) and + issubclass(differential_cls, r.BaseDifferential)): + # TODO: assumes the differential class is for the velocity + # differential + differential_cls = {'s': differential_cls} + + elif differential_cls is None: + differential_cls = {'s': 'base'} # see set_representation_cls() + + self.set_representation_cls(representation, **differential_cls) + + # if not set below, this is a frame with no data + representation_data = None + differential_data = None + + args = list(args) # need to be able to pop them + if (len(args) > 0) and (isinstance(args[0], r.BaseRepresentation) or + args[0] is None): + representation_data = args.pop(0) + if len(args) > 0: + raise TypeError( + 'Cannot create a frame with both a representation and ' + 'other positional arguments') + + if representation_data is not None: + diffs = representation_data.differentials + differential_data = diffs.get('s', None) + if ((differential_data is None and len(diffs) > 0) or + (differential_data is not None and len(diffs) > 1)): + raise ValueError('Multiple differentials are associated ' + 'with the representation object passed in ' + 'to the frame initializer. Only a single ' + 'velocity differential is supported. Got: ' + '{0}'.format(diffs)) + + elif self.representation: + representation_cls = self.representation + # Get any representation data passed in to the frame initializer + # using keyword or positional arguments for the component names + repr_kwargs = {} + for nmkw, nmrep in self.representation_component_names.items(): + if len(args) > 0: + # first gather up positional args + repr_kwargs[nmrep] = args.pop(0) + elif nmkw in kwargs: + repr_kwargs[nmrep] = kwargs.pop(nmkw) + + # special-case the Spherical->UnitSpherical if no `distance` + # TODO: possibly generalize this somehow? + if repr_kwargs: + if repr_kwargs.get('distance', True) is None: + del repr_kwargs['distance'] + + if (issubclass(representation_cls, r.SphericalRepresentation) + and 'distance' not in repr_kwargs): + representation_cls = representation_cls._unit_representation + + representation_data = representation_cls(copy=copy, **repr_kwargs) + + # Now we handle the Differential data: + # Get any differential data passed in to the frame initializer + # using keyword or positional arguments for the component names + differential_cls = self.get_representation_cls('s') + diff_component_names = self.get_representation_component_names('s') + diff_kwargs = {} + for nmkw, nmrep in diff_component_names.items(): + if len(args) > 0: + # first gather up positional args + diff_kwargs[nmrep] = args.pop(0) + elif nmkw in kwargs: + diff_kwargs[nmrep] = kwargs.pop(nmkw) + + if diff_kwargs: + if (hasattr(differential_cls, '_unit_differential') and + 'd_distance' not in diff_kwargs): + differential_cls = differential_cls._unit_differential + + elif len(diff_kwargs) == 1 and 'd_distance' in diff_kwargs: + differential_cls = r.RadialDifferential + + differential_data = differential_cls(copy=copy, **diff_kwargs) + + if len(args) > 0: + raise TypeError( + '{0}.__init__ had {1} remaining unhandled arguments'.format( + self.__class__.__name__, len(args))) + + if representation_data is None and differential_data is not None: + raise ValueError("Cannot pass in differential component data " + "without positional (representation) data.") + + if differential_data: + self._data = representation_data.with_differentials( + {'s': differential_data}) + else: + self._data = representation_data # possibly None. + + values = {} + for fnm, fdefault in self.get_frame_attr_names().items(): + # Read-only frame attributes are defined as FrameAttribue + # descriptors which are not settable, so set 'real' attributes as + # the name prefaced with an underscore. + + if fnm in kwargs: + value = kwargs.pop(fnm) + setattr(self, '_' + fnm, value) + # Validate attribute by getting it. If the instance has data, + # this also checks its shape is OK. If not, we do it below. + values[fnm] = getattr(self, fnm) + else: + setattr(self, '_' + fnm, fdefault) + self._attr_names_with_defaults.append(fnm) + + if kwargs: + raise TypeError( + 'Coordinate frame got unexpected keywords: {0}'.format( + list(kwargs))) + + # We do ``is None`` because self._data might evaluate to false for + # empty arrays or data == 0 + if self._data is None: + # No data: we still need to check that any non-scalar attributes + # have consistent shapes. Collect them for all attributes with + # size > 1 (which should be array-like and thus have a shape). + shapes = {fnm: value.shape for fnm, value in values.items() + if getattr(value, 'size', 1) > 1} + if shapes: + if len(shapes) > 1: + try: + self._no_data_shape = check_broadcast(*shapes.values()) + except ValueError: + raise ValueError( + "non-scalar attributes with inconsistent " + "shapes: {0}".format(shapes)) + + # Above, we checked that it is possible to broadcast all + # shapes. By getting and thus validating the attributes, + # we verify that the attributes can in fact be broadcast. + for fnm in shapes: + getattr(self, fnm) + else: + self._no_data_shape = shapes.popitem()[1] + + else: + self._no_data_shape = () + else: + # This makes the cache keys backwards-compatible, but also adds + # support for having differentials attached to the frame data + # representation object. + if 's' in self._data.differentials: + # TODO: assumes a velocity unit differential + key = (self._data.__class__.__name__, + self._data.differentials['s'].__class__.__name__, + False) + else: + key = (self._data.__class__.__name__, False) + + # Set up representation cache. + self.cache['representation'][key] = self._data + + @lazyproperty + def cache(self): + """ + Cache for this frame, a dict. It stores anything that should be + computed from the coordinate data (*not* from the frame attributes). + This can be used in functions to store anything that might be + expensive to compute but might be re-used by some other function. + E.g.:: + + if 'user_data' in myframe.cache: + data = myframe.cache['user_data'] + else: + myframe.cache['user_data'] = data = expensive_func(myframe.lat) + + If in-place modifications are made to the frame data, the cache should + be cleared:: + + myframe.cache.clear() + + """ + return defaultdict(dict) + + @property + def data(self): + """ + The coordinate data for this object. If this frame has no data, an + `ValueError` will be raised. Use `has_data` to + check if data is present on this frame object. + """ + if self._data is None: + raise ValueError('The frame object "{0!r}" does not have ' + 'associated data'.format(self)) + return self._data + + @property + def has_data(self): + """ + True if this frame has `data`, False otherwise. + """ + return self._data is not None + + @property + def shape(self): + return self.data.shape if self.has_data else self._no_data_shape + + # We have to override the ShapedLikeNDArray definitions, since our shape + # does not have to be that of the data. + def __len__(self): + return len(self.data) + + def __nonzero__(self): # Py 2.x + return self.has_data and self.size > 0 + + def __bool__(self): # Py 3.x + return self.has_data and self.size > 0 + + @property + def size(self): + return self.data.size + + @property + def isscalar(self): + return self.has_data and self.data.isscalar + + @classmethod + def get_frame_attr_names(cls): + return OrderedDict((name, getattr(cls, name)) + for name in cls.frame_attributes) + + def get_representation_cls(self, which='base'): + """The class used for part of this frame's data. + + Parameters + ---------- + which : ('base', 's', `None`) + The class of which part to return. 'base' means the class used to + represent the coordinates; 's' the first derivative to time, i.e., + the class representing the proper motion and/or radial velocity. + If `None`, return a dict with both. + + Returns + ------- + representation : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential`. + """ + if not hasattr(self, '_representation'): + self._representation = {'base': self.default_representation, + 's': self.default_differential} + return self._representation[which] if which is not None else self._representation + + def set_representation_cls(self, base=None, s='base'): + """Set representation and/or differential class for this frame's data. + + Parameters + ---------- + base : str, `~astropy.coordinates.BaseRepresentation` subclass, optional + The name or subclass to use to represent the coordinate data. + s : `~astropy.coordinates.BaseDifferential` subclass, optional + The differential subclass to use to represent any velocities, + such as proper motion and radial velocity. If equal to 'base', + which is the default, it will be inferred from the representation. + If `None`, the representation will drop any differentials. + """ + if base is None: + base = self._representation['base'] + self._representation = _get_repr_classes(base=base, s=s) + + representation = property( + fget=get_representation_cls, fset=set_representation_cls, + doc="""The representation class used for this frame's data. + + This will be a subclass from `~astropy.coordinates.BaseRepresentation`. + Can also be *set* using the string name of the representation. If you + wish to set an explicit differential class (rather than have it be + inferred), use the ``set_represenation_cls`` method. + """) + + @classmethod + def _get_representation_info(cls): + # This exists as a class method only to support handling frame inputs + # without units, which are deprecated and will be removed. This can be + # moved into the representation_info property at that time. + + repr_attrs = {} + for repr_diff_cls in (list(r.REPRESENTATION_CLASSES.values()) + + list(r.DIFFERENTIAL_CLASSES.values())): + repr_attrs[repr_diff_cls] = {'names': [], 'units': []} + for c in repr_diff_cls.attr_classes.keys(): + repr_attrs[repr_diff_cls]['names'].append(c) + rec_unit = repr_diff_cls.recommended_units.get(c, None) + repr_attrs[repr_diff_cls]['units'].append(rec_unit) + + for repr_diff_cls, mappings in cls._frame_specific_representation_info.items(): + + if isinstance(repr_diff_cls, six.string_types): + # TODO: this provides a layer of backwards compatibility in + # case the key is a string, but now we want explicit classes. + repr_diff_cls = _get_repr_cls(repr_diff_cls) + + # take the 'names' and 'units' tuples from repr_attrs, + # and then use the RepresentationMapping objects + # to update as needed for this frame. + nms = repr_attrs[repr_diff_cls]['names'] + uns = repr_attrs[repr_diff_cls]['units'] + comptomap = dict([(m.reprname, m) for m in mappings]) + for i, c in enumerate(repr_diff_cls.attr_classes.keys()): + if c in comptomap: + mapp = comptomap[c] + nms[i] = mapp.framename + + # need the isinstance because otherwise if it's a unit it + # will try to compare to the unit string representation + if not (isinstance(mapp.defaultunit, six.string_types) and + mapp.defaultunit == 'recommended'): + uns[i] = mapp.defaultunit + # else we just leave it as recommended_units says above + + # Convert to tuples so that this can't mess with frame internals + repr_attrs[repr_diff_cls]['names'] = tuple(nms) + repr_attrs[repr_diff_cls]['units'] = tuple(uns) + + return repr_attrs + + @property + def representation_info(self): + """ + A dictionary with the information of what attribute names for this frame + apply to particular representations. + """ + return self._get_representation_info() + + def get_representation_component_names(self, which='base'): + out = OrderedDict() + repr_or_diff_cls = self.get_representation_cls(which) + if repr_or_diff_cls is None: + return out + data_names = repr_or_diff_cls.attr_classes.keys() + repr_names = self.representation_info[repr_or_diff_cls]['names'] + for repr_name, data_name in zip(repr_names, data_names): + out[repr_name] = data_name + return out + + def get_representation_component_units(self, which='base'): + out = OrderedDict() + repr_or_diff_cls = self.get_representation_cls(which) + if repr_or_diff_cls is None: + return out + repr_attrs = self.representation_info[repr_or_diff_cls] + repr_names = repr_attrs['names'] + repr_units = repr_attrs['units'] + for repr_name, repr_unit in zip(repr_names, repr_units): + if repr_unit: + out[repr_name] = repr_unit + return out + + representation_component_names = property(get_representation_component_names) + + representation_component_units = property(get_representation_component_units) + + def replicate(self, copy=False, **kwargs): + """ + Return a replica of the frame, optionally with new frame attributes. + + The replica is a new frame object that has the same data as this frame + object and with frame attributes overriden if they are provided as extra + keyword arguments to this method. If ``copy`` is set to `True` then a + copy of the internal arrays will be made. Otherwise the replica will + use a reference to the original arrays when possible to save memory. The + internal arrays are normally not changeable by the user so in most cases + it should not be necessary to set ``copy`` to `True`. + + Parameters + ---------- + copy : bool, optional + If True, the resulting object is a copy of the data. When False, + references are used where possible. This rule also applies to the + frame attributes. + + Any additional keywords are treated as frame attributes to be set on the + new frame object. + + Returns + ------- + frameobj : same as this frame + Replica of this object, but possibly with new frame attributes. + """ + return self._apply('copy' if copy else 'replicate', **kwargs) + + def replicate_without_data(self, copy=False, **kwargs): + """ + Return a replica without data, optionally with new frame attributes. + + The replica is a new frame object without data but with the same frame + attributes as this object, except where overriden by extra keyword + arguments to this method. The ``copy`` keyword determines if the frame + attributes are truly copied vs being references (which saves memory for + cases where frame attributes are large). + + This method is essentially the converse of `realize_frame`. + + Parameters + ---------- + copy : bool, optional + If True, the resulting object has copies of the frame attributes. + When False, references are used where possible. + + Any additional keywords are treated as frame attributes to be set on the + new frame object. + + Returns + ------- + frameobj : same as this frame + Replica of this object, but without data and possibly with new frame + attributes. + """ + kwargs['_framedata'] = None + return self._apply('copy' if copy else 'replicate', **kwargs) + + def realize_frame(self, representation): + """ + Generates a new frame *with new data* from another frame (which may or + may not have data). Roughly speaking, the converse of + `replicate_without_data`. + + Parameters + ---------- + representation : BaseRepresentation + The representation to use as the data for the new frame. + + Returns + ------- + frameobj : same as this frame + A new object with the same frame attributes as this one, but + with the ``representation`` as the data. + """ + # Here we pass representation_cls=None to _apply, since we do not want + # to insist that the realized frame has the same representation as + # self. [Avoids breaking sunpy; see gh-6208] + # TODO: should we expose this, so one has a choice? + return self._apply('replicate', _framedata=representation, + representation_cls=None) + + def represent_as(self, base, s='base', in_frame_units=False): + """ + Generate and return a new representation of this frame's `data` + as a Representation object. + + Note: In order to make an in-place change of the representation + of a Frame or SkyCoord object, set the ``representation`` + attribute of that object to the desired new representation, or + use the ``set_representation_cls`` method to also set the differential. + + Parameters + ---------- + base : subclass of BaseRepresentation or string + The type of representation to generate. Must be a *class* + (not an instance), or the string name of the representation + class. + s : subclass of `~astropy.coordinates.BaseDifferential`, str, optional + Class in which any velocities should be represented. Must be + a *class* (not an instance), or the string name of the + differential class. If equal to 'base' (default), inferred from + the base class. If `None`, all velocity information is dropped. + in_frame_units : bool, keyword only + Force the representation units to match the specified units + particular to this frame + + Returns + ------- + newrep : BaseRepresentation-derived object + A new representation object of this frame's `data`. + + Raises + ------ + AttributeError + If this object had no `data` + + Examples + -------- + >>> from astropy import units as u + >>> from astropy.coordinates import SkyCoord, CartesianRepresentation + >>> coord = SkyCoord(0*u.deg, 0*u.deg) + >>> coord.represent_as(CartesianRepresentation) # doctest: +FLOAT_CMP + + + >>> coord.representation = CartesianRepresentation + >>> coord # doctest: +FLOAT_CMP + + """ + + # For backwards compatibility (because in_frame_units used to be the + # 2nd argument), we check to see if `new_differential` is a boolean. If + # it is, we ignore the value of `new_differential` and warn about the + # position change + if isinstance(s, bool): + warnings.warn("The argument position for `in_frame_units` in " + "`represent_as` has changed. Use as a keyword " + "argument if needed.", AstropyWarning) + in_frame_units = s + s = 'base' + + # In the future, we may want to support more differentials, in which + # case one probably needs to define **kwargs above and use it here. + # But for now, we only care about the velocity. + repr_classes = _get_repr_classes(base=base, s=s) + representation_cls = repr_classes['base'] + # We only keep velocity information + if 's' in self.data.differentials: + differential_cls = repr_classes['s'] + elif s is None or s == 'base': + differential_cls = None + else: + raise TypeError('Frame data has no associated differentials ' + '(i.e. the frame has no velocity data) - ' + 'represent_as() only accepts a new ' + 'representation.') + + if differential_cls: + cache_key = (representation_cls.__name__, + differential_cls.__name__, in_frame_units) + else: + cache_key = (representation_cls.__name__, in_frame_units) + + cached_repr = self.cache['representation'].get(cache_key) + if not cached_repr: + if differential_cls: + # TODO NOTE: only supports a single differential + data = self.data.represent_as(representation_cls, + differential_cls) + diff = data.differentials['s'] # TODO: assumes velocity + else: + data = self.data.represent_as(representation_cls) + + # If the new representation is known to this frame and has a defined + # set of names and units, then use that. + new_attrs = self.representation_info.get(representation_cls) + if new_attrs and in_frame_units: + datakwargs = dict((comp, getattr(data, comp)) + for comp in data.components) + for comp, new_attr_unit in zip(data.components, new_attrs['units']): + if new_attr_unit: + datakwargs[comp] = datakwargs[comp].to(new_attr_unit) + data = data.__class__(copy=False, **datakwargs) + + if differential_cls: + # the original differential + data_diff = self.data.differentials['s'] + + # If the new differential is known to this frame and has a + # defined set of names and units, then use that. + new_attrs = self.representation_info.get(differential_cls) + if new_attrs and in_frame_units: + diffkwargs = dict((comp, getattr(diff, comp)) + for comp in diff.components) + for comp, new_attr_unit in zip(diff.components, + new_attrs['units']): + # Some special-casing to treat a situation where the + # input data has a UnitSphericalDifferential or a + # RadialDifferential. It is re-represented to the + # frame's differential class (which might be, e.g., a + # dimensional Differential), so we don't want to try to + # convert the empty component units + if (isinstance(data_diff, + (r.UnitSphericalDifferential, + r.UnitSphericalCosLatDifferential)) and + comp not in data_diff.__class__.attr_classes): + continue + + elif (isinstance(data_diff, r.RadialDifferential) and + comp not in data_diff.__class__.attr_classes): + continue + + if new_attr_unit and hasattr(diff, comp): + diffkwargs[comp] = diffkwargs[comp].to(new_attr_unit) + + diff = diff.__class__(copy=False, **diffkwargs) + + # Here we have to bypass using with_differentials() because + # it has a validation check. But because .representation and + # .differential_cls don't point to the original classes, if + # the input differential is a RadialDifferential, it usually + # gets turned into a SphericalCosLatDifferential (or + # whatever the default is) with strange units for the d_lon + # and d_lat attributes. This then causes the dictionary key + # check to fail (i.e. comparison against + # `diff._get_deriv_key()`) + data._differentials.update({'s': diff}) + # data = data.with_differentials({'s': diff}) + + self.cache['representation'][cache_key] = data + + return self.cache['representation'][cache_key] + + def transform_to(self, new_frame): + """ + Transform this object's coordinate data to a new frame. + + Parameters + ---------- + new_frame : class or frame object or SkyCoord object + The frame to transform this coordinate frame into. + + Returns + ------- + transframe + A new object with the coordinate data represented in the + ``newframe`` system. + + Raises + ------ + ValueError + If there is no possible transformation route. + """ + from .errors import ConvertError + + if self._data is None: + raise ValueError('Cannot transform a frame with no data') + + if (getattr(self.data, 'differentials', None) and + hasattr(self, 'obstime') and hasattr(new_frame, 'obstime') and + np.any(self.obstime != new_frame.obstime)): + raise NotImplementedError('You cannot transform a frame that has ' + 'velocities to another frame at a ' + 'different obstime. If you think this ' + 'should (or should not) be possible, ' + 'please comment at https://github.com/astropy/astropy/issues/6280') + + if inspect.isclass(new_frame): + # Use the default frame attributes for this class + new_frame = new_frame() + + if hasattr(new_frame, '_sky_coord_frame'): + # Input new_frame is not a frame instance or class and is most + # likely a SkyCoord object. + new_frame = new_frame._sky_coord_frame + + trans = frame_transform_graph.get_transform(self.__class__, + new_frame.__class__) + if trans is None: + if new_frame is self.__class__: + # no special transform needed, but should update frame info + return new_frame.realize_frame(self.data) + msg = 'Cannot transform from {0} to {1}' + raise ConvertError(msg.format(self.__class__, new_frame.__class__)) + return trans(self, new_frame) + + def is_transformable_to(self, new_frame): + """ + Determines if this coordinate frame can be transformed to another + given frame. + + Parameters + ---------- + new_frame : class or frame object + The proposed frame to transform into. + + Returns + ------- + transformable : bool or str + `True` if this can be transformed to ``new_frame``, `False` if + not, or the string 'same' if ``new_frame`` is the same system as + this object but no transformation is defined. + + Notes + ----- + A return value of 'same' means the transformation will work, but it will + just give back a copy of this object. The intended usage is:: + + if coord.is_transformable_to(some_unknown_frame): + coord2 = coord.transform_to(some_unknown_frame) + + This will work even if ``some_unknown_frame`` turns out to be the same + frame class as ``coord``. This is intended for cases where the frame + is the same regardless of the frame attributes (e.g. ICRS), but be + aware that it *might* also indicate that someone forgot to define the + transformation between two objects of the same frame class but with + different attributes. + """ + + new_frame_cls = new_frame if inspect.isclass(new_frame) else new_frame.__class__ + trans = frame_transform_graph.get_transform(self.__class__, new_frame_cls) + + if trans is None: + if new_frame_cls is self.__class__: + return 'same' + else: + return False + else: + return True + + def is_frame_attr_default(self, attrnm): + """ + Determine whether or not a frame attribute has its value because it's + the default value, or because this frame was created with that value + explicitly requested. + + Parameters + ---------- + attrnm : str + The name of the attribute to check. + + Returns + ------- + isdefault : bool + True if the attribute ``attrnm`` has its value by default, False if + it was specified at creation of this frame. + """ + return attrnm in self._attr_names_with_defaults + + def is_equivalent_frame(self, other): + """ + Checks if this object is the same frame as the ``other`` object. + + To be the same frame, two objects must be the same frame class and have + the same frame attributes. Note that it does *not* matter what, if any, + data either object has. + + Parameters + ---------- + other : BaseCoordinateFrame + the other frame to check + + Returns + ------- + isequiv : bool + True if the frames are the same, False if not. + + Raises + ------ + TypeError + If ``other`` isn't a `BaseCoordinateFrame` or subclass. + """ + if self.__class__ == other.__class__: + for frame_attr_name in self.get_frame_attr_names(): + if np.any(getattr(self, frame_attr_name) != + getattr(other, frame_attr_name)): + return False + return True + elif not isinstance(other, BaseCoordinateFrame): + raise TypeError("Tried to do is_equivalent_frame on something that " + "isn't a frame") + else: + return False + + def __repr__(self): + frameattrs = self._frame_attrs_repr() + data_repr = self._data_repr() + + if frameattrs: + frameattrs = ' ({0})'.format(frameattrs) + + if data_repr: + return '<{0} Coordinate{1}: {2}>'.format(self.__class__.__name__, + frameattrs, data_repr) + else: + return '<{0} Frame{1}>'.format(self.__class__.__name__, + frameattrs) + + def _data_repr(self): + """Returns a string representation of the coordinate data.""" + + if not self.has_data: + return '' + + if self.representation: + if (issubclass(self.representation, r.SphericalRepresentation) and + isinstance(self.data, r.UnitSphericalRepresentation)): + rep_cls = self.data.__class__ + else: + rep_cls = self.representation + + if 's' in self.data.differentials: + dif_cls = self.get_representation_cls('s') + dif_data = self.data.differentials['s'] + if isinstance(dif_data, (r.UnitSphericalDifferential, + r.UnitSphericalCosLatDifferential, + r.RadialDifferential)): + dif_cls = dif_data.__class__ + + else: + dif_cls = None + + data = self.represent_as(rep_cls, dif_cls, in_frame_units=True) + + data_repr = repr(data) + for nmpref, nmrepr in self.representation_component_names.items(): + data_repr = data_repr.replace(nmrepr, nmpref) + + else: + data = self.data + data_repr = repr(self.data) + + if data_repr.startswith('<' + data.__class__.__name__): + # remove both the leading "<" and the space after the name, as well + # as the trailing ">" + data_repr = data_repr[(len(data.__class__.__name__) + 2):-1] + else: + data_repr = 'Data:\n' + data_repr + + if 's' in self.data.differentials: + data_repr_spl = data_repr.split('\n') + if 'has differentials' in data_repr_spl[-1]: + diffrepr = repr(data.differentials['s']).split('\n') + if diffrepr[0].startswith('<'): + diffrepr[0] = ' ' + ' '.join(diffrepr[0].split(' ')[1:]) + for frm_nm, rep_nm in self.get_representation_component_names('s').items(): + diffrepr[0] = diffrepr[0].replace(rep_nm, frm_nm) + if diffrepr[-1].endswith('>'): + diffrepr[-1] = diffrepr[-1][:-1] + data_repr_spl[-1] = '\n'.join(diffrepr) + + data_repr = '\n'.join(data_repr_spl) + + return data_repr + + def _frame_attrs_repr(self): + """ + Returns a string representation of the frame's attributes, if any. + """ + return ', '.join([attrnm + '=' + str(getattr(self, attrnm)) + for attrnm in self.get_frame_attr_names()]) + + def _apply(self, method, *args, **kwargs): + """Create a new instance, applying a method to the underlying data. + + In typical usage, the method is any of the shape-changing methods for + `~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those + picking particular elements (``__getitem__``, ``take``, etc.), which + are all defined in `~astropy.utils.misc.ShapedLikeNDArray`. It will be + applied to the underlying arrays in the representation (e.g., ``x``, + ``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`), + as well as to any frame attributes that have a shape, with the results + used to create a new instance. + + Internally, it is also used to apply functions to the above parts + (in particular, `~numpy.broadcast_to`). + + Parameters + ---------- + method : str or callable + If str, it is the name of a method that is applied to the internal + ``components``. If callable, the function is applied. + args : tuple + Any positional arguments for ``method``. + kwargs : dict + Any keyword arguments for ``method``. + """ + if '_framedata' in kwargs: + data = kwargs.pop('_framedata') + else: + data = self.data if self.has_data else None + + # This is to provide a slightly nicer error message if the user tries to + # use frame_obj.representation instead of frame_obj.data to get the + # underlying representation object [e.g., #2890] + if inspect.isclass(data): + raise TypeError('Class passed as data instead of a representation ' + 'instance. If you called frame.representation, this' + ' returns the representation class. frame.data ' + 'returns the instantiated object - you may want to ' + ' use this instead.') + + # TODO: expose this trickery in docstring? + representation_cls = kwargs.pop('representation_cls', + self.representation) + + differential_cls = kwargs.pop('differential_cls', + self.get_representation_cls('s')) + + def apply_method(value): + if isinstance(value, ShapedLikeNDArray): + if method == 'replicate' and not hasattr(value, method): + return value # reference directly + else: + return value._apply(method, *args, **kwargs) + else: + if callable(method): + return method(value, *args, **kwargs) + else: + if method == 'replicate' and not hasattr(value, method): + return value # reference directly + else: + return getattr(value, method)(*args, **kwargs) + + if data is not None: + data = apply_method(data) + + # TODO: change to representation_cls in __init__ - gh-6219. + frattrs = {'representation': representation_cls, + 'differential_cls': differential_cls} + for attr in self.get_frame_attr_names(): + if attr not in self._attr_names_with_defaults: + if (method == 'copy' or method == 'replicate') and attr in kwargs: + value = kwargs[attr] + else: + value = getattr(self, attr) + if getattr(value, 'size', 1) > 1: + value = apply_method(value) + elif method == 'copy' or method == 'flatten': + # flatten should copy also for a single element array, but + # we cannot use it directly for array scalars, since it + # always returns a one-dimensional array. So, just copy. + value = copy.copy(value) + + frattrs[attr] = value + + return self.__class__(data, **frattrs) + + @override__dir__ + def __dir__(self): + """ + Override the builtin `dir` behavior to include representation + names. + + TODO: dynamic representation transforms (i.e. include cylindrical et al.). + """ + dir_values = set(self.representation_component_names) + dir_values |= set(self.get_representation_component_names('s')) + + return dir_values + + def __getattr__(self, attr): + """ + Allow access to attributes on the representation and differential as + found via ``self.get_representation_component_names``. + + TODO: We should handle dynamic representation transforms here (e.g., + `.cylindrical`) instead of defining properties as below. + """ + + # attr == '_representation' is likely from the hasattr() test in the + # representation property which is used for + # self.representation_component_names. + # + # Prevent infinite recursion here. + if attr.startswith('_'): + return self.__getattribute__(attr) # Raise AttributeError. + + repr_names = self.representation_component_names + if attr in repr_names: + if self._data is None: + self.data # this raises the "no data" error by design - doing it + # this way means we don't have to replicate the error message here + + rep = self.represent_as(self.representation, in_frame_units=True) + val = getattr(rep, repr_names[attr]) + return val + + diff_names = self.get_representation_component_names('s') + if attr in diff_names: + if self._data is None: + self.data # see above. + # TODO: this doesn't work for the case when there is only + # unitspherical information. The differential_cls gets set to the + # default_differential, which expects full information, so the + # units don't work out + rep = self.represent_as(in_frame_units=True, + **self.get_representation_cls(None)) + val = getattr(rep.differentials['s'], diff_names[attr]) + return val + + return self.__getattribute__(attr) # Raise AttributeError. + + def __setattr__(self, attr, value): + repr_attr_names = set() + if hasattr(self, 'representation_info'): + for representation_attr in self.representation_info.values(): + repr_attr_names.update(representation_attr['names']) + + if attr in repr_attr_names: + raise AttributeError( + 'Cannot set any frame attribute {0}'.format(attr)) + else: + super(BaseCoordinateFrame, self).__setattr__(attr, value) + + def separation(self, other): + """ + Computes on-sky separation between this coordinate and another. + + .. note:: + + If the ``other`` coordinate object is in a different frame, it is + first transformed to the frame of this object. This can lead to + unintutive behavior if not accounted for. Particularly of note is + that ``self.separation(other)`` and ``other.separation(self)`` may + not give the same answer in this case. + + Parameters + ---------- + other : `~astropy.coordinates.BaseCoordinateFrame` + The coordinate to get the separation to. + + Returns + ------- + sep : `~astropy.coordinates.Angle` + The on-sky separation between this and the ``other`` coordinate. + + Notes + ----- + The separation is calculated using the Vincenty formula, which + is stable at all locations, including poles and antipodes [1]_. + + .. [1] http://en.wikipedia.org/wiki/Great-circle_distance + + """ + from .angle_utilities import angular_separation + from .angles import Angle + + self_unit_sph = self.represent_as(r.UnitSphericalRepresentation) + other_transformed = other.transform_to(self) + other_unit_sph = other_transformed.represent_as(r.UnitSphericalRepresentation) + + # Get the separation as a Quantity, convert to Angle in degrees + sep = angular_separation(self_unit_sph.lon, self_unit_sph.lat, + other_unit_sph.lon, other_unit_sph.lat) + return Angle(sep, unit=u.degree) + + def separation_3d(self, other): + """ + Computes three dimensional separation between this coordinate + and another. + + Parameters + ---------- + other : `~astropy.coordinates.BaseCoordinateFrame` + The coordinate system to get the distance to. + + Returns + ------- + sep : `~astropy.coordinates.Distance` + The real-space distance between these two coordinates. + + Raises + ------ + ValueError + If this or the other coordinate do not have distances. + """ + + from .distances import Distance + + if issubclass(self.data.__class__, r.UnitSphericalRepresentation): + raise ValueError('This object does not have a distance; cannot ' + 'compute 3d separation.') + + # do this first just in case the conversion somehow creates a distance + other_in_self_system = other.transform_to(self) + + if issubclass(other_in_self_system.__class__, r.UnitSphericalRepresentation): + raise ValueError('The other object does not have a distance; ' + 'cannot compute 3d separation.') + + # drop the differentials to ensure they don't do anything odd in the + # subtraction + self_car = self.data.without_differentials().represent_as(r.CartesianRepresentation) + other_car = other_in_self_system.data.without_differentials().represent_as(r.CartesianRepresentation) + return Distance((self_car - other_car).norm()) + + @property + def cartesian(self): + """ + Shorthand for a cartesian representation of the coordinates in this + object. + """ + + # TODO: if representations are updated to use a full transform graph, + # the representation aliases should not be hard-coded like this + return self.represent_as('cartesian', in_frame_units=True) + + @property + def spherical(self): + """ + Shorthand for a spherical representation of the coordinates in this + object. + """ + + # TODO: if representations are updated to use a full transform graph, + # the representation aliases should not be hard-coded like this + return self.represent_as('spherical', in_frame_units=True) + + @property + def sphericalcoslat(self): + """ + Shorthand for a spherical representation of the positional data and a + `SphericalCosLatDifferential` for the velocity data in this object. + """ + + # TODO: if representations are updated to use a full transform graph, + # the representation aliases should not be hard-coded like this + return self.represent_as('spherical', 'sphericalcoslat', + in_frame_units=True) + + +class GenericFrame(BaseCoordinateFrame): + """ + A frame object that can't store data but can hold any arbitrary frame + attributes. Mostly useful as a utility for the high-level class to store + intermediate frame attributes. + + Parameters + ---------- + frame_attrs : dict + A dictionary of attributes to be used as the frame attributes for this + frame. + """ + + name = None # it's not a "real" frame so it doesn't have a name + + def __init__(self, frame_attrs): + self.frame_attributes = OrderedDict() + for name, default in frame_attrs.items(): + self.frame_attributes[name] = Attribute(default) + setattr(self, '_' + name, default) + + super(GenericFrame, self).__init__(None) + + def __getattr__(self, name): + if '_' + name in self.__dict__: + return getattr(self, '_' + name) + else: + raise AttributeError('no {0}'.format(name)) + + def __setattr__(self, name, value): + if name in self.get_frame_attr_names(): + raise AttributeError("can't set frame attribute '{0}'".format(name)) + else: + super(GenericFrame, self).__setattr__(name, value) diff --git a/astropy/coordinates/builtin_frames/__init__.py b/astropy/coordinates/builtin_frames/__init__.py new file mode 100644 index 0000000..0fbd0f3 --- /dev/null +++ b/astropy/coordinates/builtin_frames/__init__.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This package contains the coordinate frames actually implemented by astropy. + +Users shouldn't use this module directly, but rather import from the +`astropy.coordinates` module. While it is likely to exist for the long-term, +the existence of this package and details of its organization should be +considered an implementation detail, and is not guaranteed to hold for future +versions of astropy. + +Notes +----- +The builtin frame classes are all imported automatically into this package's +namespace, so there's no need to access the sub-modules directly. + +To implement a new frame in Astropy, a developer should add the frame as a new +module in this package. Any "self" transformations (i.e., those that transform +from one frame to another frame of the same class) should be included in that +module. Transformation functions connecting the new frame to other frames +should be in a separate module, which should be imported in this package's +``__init__.py`` to ensure the transformations are hooked up when this package is +imported. Placing the trasnformation functions in separate modules avoids +circular dependencies, because they need references to the frame classes. +""" + +from .baseradec import BaseRADecFrame +from .icrs import ICRS +from .fk5 import FK5 +from .fk4 import FK4, FK4NoETerms +from .galactic import Galactic +from .galactocentric import Galactocentric +from .lsr import LSR, GalacticLSR +from .supergalactic import Supergalactic +from .altaz import AltAz +from .gcrs import GCRS, PrecessedGeocentric +from .cirs import CIRS +from .itrs import ITRS +from .hcrs import HCRS +from .ecliptic import (GeocentricTrueEcliptic, BarycentricTrueEcliptic, + HeliocentricTrueEcliptic, BaseEclipticFrame) +from .skyoffset import SkyOffsetFrame +# need to import transformations so that they get registered in the graph +from . import icrs_fk5_transforms +from . import fk4_fk5_transforms +from . import galactic_transforms +from . import supergalactic_transforms +from . import icrs_cirs_transforms +from . import cirs_observed_transforms +from . import intermediate_rotation_transforms +from . import ecliptic_transforms + +# we define an __all__ because otherwise the transformation modules get included +__all__ = ['ICRS', 'FK5', 'FK4', 'FK4NoETerms', 'Galactic', 'Galactocentric', + 'Supergalactic', 'AltAz', 'GCRS', 'CIRS', 'ITRS', 'HCRS', + 'PrecessedGeocentric', 'GeocentricTrueEcliptic', + 'BarycentricTrueEcliptic', 'HeliocentricTrueEcliptic', + 'SkyOffsetFrame', 'GalacticLSR', 'LSR', + 'BaseEclipticFrame', 'BaseRADecFrame'] + + +def _make_transform_graph_docs(): + """ + Generates a string for use with the coordinate package's docstring + to show the available transforms and coordinate systems + """ + import inspect + from textwrap import dedent + from ...extern import six + from ..baseframe import BaseCoordinateFrame, frame_transform_graph + + isclass = inspect.isclass + coosys = [item for item in six.itervalues(globals()) + if isclass(item) and issubclass(item, BaseCoordinateFrame)] + + # currently, all of the priorities are set to 1, so we don't need to show + # then in the transform graph. + graphstr = frame_transform_graph.to_dot_graph(addnodes=coosys, + priorities=False) + + docstr = """ + The diagram below shows all of the coordinate systems built into the + `~astropy.coordinates` package, their aliases (useful for converting + other coordinates to them using attribute-style access) and the + pre-defined transformations between them. The user is free to + override any of these transformations by defining new transformations + between these systems, but the pre-defined transformations should be + sufficient for typical usage. + + The color of an edge in the graph (i.e. the transformations between two + frames) is set by the type of transformation; the legend box defines the + mapping from transform class name to color. + + + .. graphviz:: + + """ + + docstr = dedent(docstr) + ' ' + graphstr.replace('\n', '\n ') + + # colors are in dictionary at the bottom of transformations.py + from ..transformations import trans_to_color + html_list_items = [] + for cls, color in trans_to_color.items(): + block = u""" +
  • +

    + {0}: + ➝ +

    +
  • + """.format(cls.__name__, color) + html_list_items.append(block) + + graph_legend = u""" + .. raw:: html + +
      + {} +
    + """.format("\n".join(html_list_items)) + docstr = docstr + dedent(graph_legend) + + return docstr + + +_transform_graph_docs = _make_transform_graph_docs() diff --git a/astropy/coordinates/builtin_frames/altaz.py b/astropy/coordinates/builtin_frames/altaz.py new file mode 100644 index 0000000..db7b5c6 --- /dev/null +++ b/astropy/coordinates/builtin_frames/altaz.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +import numpy as np + +from ... import units as u +from .. import representation as r +from ..baseframe import BaseCoordinateFrame, RepresentationMapping +from ..attributes import (Attribute, TimeAttribute, + QuantityAttribute, EarthLocationAttribute) + +_90DEG = 90*u.deg + + +class AltAz(BaseCoordinateFrame): + """ + A coordinate or frame in the Altitude-Azimuth system (Horizontal + coordinates). Azimuth is oriented East of North (i.e., N=0, E=90 degrees). + + This frame is assumed to *include* refraction effects if the ``pressure`` + frame attribute is non-zero. + + The frame attributes are listed under **Other Parameters**, which are + necessary for transforming from AltAz to some other system. + + Parameters + ---------- + representation : `BaseRepresentation` or None + A representation object or None to have no data (or use the other + keywords) + + az : `Angle`, optional, must be keyword + The Azimuth for this object (``alt`` must also be given and + ``representation`` must be None). + alt : `Angle`, optional, must be keyword + The Altitude for this object (``az`` must also be given and + ``representation`` must be None). + distance : :class:`~astropy.units.Quantity`, optional, must be keyword + The Distance for this object along the line-of-sight. + + pm_az_cosalt : :class:`~astropy.units.Quantity`, optional, must be keyword + The proper motion in azimuth (including the ``cos(alt)`` factor) for + this object (``pm_alt`` must also be given). + pm_alt : :class:`~astropy.units.Quantity`, optional, must be keyword + The proper motion in altitude for this object (``pm_az_cosalt`` must + also be given). + radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword + The radial velocity of this object. + + copy : bool, optional + If `True` (default), make copies of the input coordinate arrays. + Can only be passed in as a keyword argument. + + differential_cls : `BaseDifferential`, dict, optional + A differential class or dictionary of differential classes (currently + only a velocity differential with key 's' is supported). This sets + the expected input differential class, thereby changing the expected + keyword arguments of the data passed in. For example, passing + ``differential_cls=CartesianDifferential`` will make the classes + expect velocity data with the argument names ``v_x, v_y, v_z``. + + Other parameters + ---------------- + obstime : `~astropy.time.Time` + The time at which the observation is taken. Used for determining the + position and orientation of the Earth. + location : `~astropy.coordinates.EarthLocation` + The location on the Earth. This can be specified either as an + `~astropy.coordinates.EarthLocation` object or as anything that can be + transformed to an `~astropy.coordinates.ITRS` frame. + pressure : `~astropy.units.Quantity` + The atmospheric pressure as an `~astropy.units.Quantity` with pressure + units. This is necessary for performing refraction corrections. + Setting this to 0 (the default) will disable refraction calculations + when transforming to/from this frame. + temperature : `~astropy.units.Quantity` + The ground-level temperature as an `~astropy.units.Quantity` in + deg C. This is necessary for performing refraction corrections. + relative_humidity`` : numeric + The relative humidity as a number from 0 to 1. This is necessary for + performing refraction corrections. + obswl : `~astropy.units.Quantity` + The average wavelength of observations as an `~astropy.units.Quantity` + with length units. This is necessary for performing refraction + corrections. + + Notes + ----- + The refraction model is based on that implemented in ERFA, which is fast + but becomes inaccurate for altitudes below about 5 degrees. Near and below + altitudes of 0, it can even give meaningless answers, and in this case + transforming to AltAz and back to another frame can give highly discrepent + results. For much better numerical stability, leaving the ``pressure`` at + ``0`` (the default), disabling the refraction correction (yielding + "topocentric" horizontal coordinates). + + """ + + frame_specific_representation_info = { + r.SphericalRepresentation: [ + RepresentationMapping('lon', 'az'), + RepresentationMapping('lat', 'alt') + ], + r.SphericalCosLatDifferential: [ + RepresentationMapping('d_lon_coslat', 'pm_az_cosalt', u.mas/u.yr), + RepresentationMapping('d_lat', 'pm_alt', u.mas/u.yr), + RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s), + ], + r.SphericalDifferential: [ + RepresentationMapping('d_lon', 'pm_az', u.mas/u.yr), + RepresentationMapping('d_lat', 'pm_alt', u.mas/u.yr), + RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s) + ], + r.CartesianDifferential: [ + RepresentationMapping('d_x', 'v_x', u.km/u.s), + RepresentationMapping('d_y', 'v_y', u.km/u.s), + RepresentationMapping('d_z', 'v_z', u.km/u.s), + ], + } + frame_specific_representation_info[r.UnitSphericalRepresentation] = \ + frame_specific_representation_info[r.SphericalRepresentation] + frame_specific_representation_info[r.UnitSphericalCosLatDifferential] = \ + frame_specific_representation_info[r.SphericalCosLatDifferential] + frame_specific_representation_info[r.UnitSphericalDifferential] = \ + frame_specific_representation_info[r.SphericalDifferential] + + default_representation = r.SphericalRepresentation + default_differential = r.SphericalCosLatDifferential + + obstime = TimeAttribute(default=None) + location = EarthLocationAttribute(default=None) + pressure = QuantityAttribute(default=0, unit=u.hPa) + temperature = QuantityAttribute(default=0, unit=u.deg_C) + relative_humidity = Attribute(default=0) + obswl = QuantityAttribute(default=1*u.micron, unit=u.micron) + + def __init__(self, *args, **kwargs): + super(AltAz, self).__init__(*args, **kwargs) + + @property + def secz(self): + """ + Secant if the zenith angle for this coordinate, a common estimate of the + airmass. + """ + return 1/np.sin(self.alt) + + @property + def zen(self): + """ + The zenith angle for this coordinate + """ + return _90DEG.to(self.alt.unit) - self.alt + + +# self-transform defined in cirs_observed_transforms.py diff --git a/astropy/coordinates/builtin_frames/baseradec.py b/astropy/coordinates/builtin_frames/baseradec.py new file mode 100644 index 0000000..32dbbd6 --- /dev/null +++ b/astropy/coordinates/builtin_frames/baseradec.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from ... import units as u +from .. import representation as r +from ..baseframe import BaseCoordinateFrame, RepresentationMapping + +__all__ = ['BaseRADecFrame'] + +_base_radec_docstring = """Parameters + ---------- + representation : `BaseRepresentation` or None + A representation object or ``None`` to have no data (or use the other + keywords below). + + ra : `Angle`, optional, must be keyword + The RA for this object (``dec`` must also be given and ``representation`` + must be None). + dec : `Angle`, optional, must be keyword + The Declination for this object (``ra`` must also be given and + ``representation`` must be None). + distance : `~astropy.units.Quantity`, optional, must be keyword + The Distance for this object along the line-of-sight. + (``representation`` must be None). + + pm_ra_cosdec : :class:`~astropy.units.Quantity`, optional, must be keyword + The proper motion in Right Ascension (including the ``cos(dec)`` factor) + for this object (``pm_dec`` must also be given). + pm_dec : :class:`~astropy.units.Quantity`, optional, must be keyword + The proper motion in Declination for this object (``pm_ra_cosdec`` must + also be given). + radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword + The radial velocity of this object. + + copy : bool, optional + If `True` (default), make copies of the input coordinate arrays. + Can only be passed in as a keyword argument. + + differential_cls : `BaseDifferential`, dict, optional + A differential class or dictionary of differential classes (currently + only a velocity differential with key 's' is supported). This sets + the expected input differential class, thereby changing the expected + keyword arguments of the data passed in. For example, passing + ``differential_cls=CartesianDifferential`` will make the classes + expect velocity data with the argument names ``v_x, v_y, v_z``. +""" + + +class BaseRADecFrame(BaseCoordinateFrame): + """ + A base class that defines default representation info for frames that + represent longitude and latitude as Right Ascension and Declination + following typical "equatorial" conventions. + + {params} + """ + frame_specific_representation_info = { + r.SphericalRepresentation: [ + RepresentationMapping('lon', 'ra'), + RepresentationMapping('lat', 'dec') + ], + r.SphericalCosLatDifferential: [ + RepresentationMapping('d_lon_coslat', 'pm_ra_cosdec', u.mas/u.yr), + RepresentationMapping('d_lat', 'pm_dec', u.mas/u.yr), + RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s) + ], + r.SphericalDifferential: [ + RepresentationMapping('d_lon', 'pm_ra', u.mas/u.yr), + RepresentationMapping('d_lat', 'pm_dec', u.mas/u.yr), + RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s) + ], + r.CartesianDifferential: [ + RepresentationMapping('d_x', 'v_x', u.km/u.s), + RepresentationMapping('d_y', 'v_y', u.km/u.s), + RepresentationMapping('d_z', 'v_z', u.km/u.s) + ], + } + frame_specific_representation_info[r.UnitSphericalRepresentation] = \ + frame_specific_representation_info[r.SphericalRepresentation] + frame_specific_representation_info[r.UnitSphericalCosLatDifferential] = \ + frame_specific_representation_info[r.SphericalCosLatDifferential] + frame_specific_representation_info[r.UnitSphericalDifferential] = \ + frame_specific_representation_info[r.SphericalDifferential] + + default_representation = r.SphericalRepresentation + default_differential = r.SphericalCosLatDifferential + + +BaseRADecFrame.__doc__ = BaseRADecFrame.__doc__.format( + params=_base_radec_docstring) diff --git a/astropy/coordinates/builtin_frames/cirs.py b/astropy/coordinates/builtin_frames/cirs.py new file mode 100644 index 0000000..4253bba --- /dev/null +++ b/astropy/coordinates/builtin_frames/cirs.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from ..attributes import TimeAttribute + +from .baseradec import _base_radec_docstring, BaseRADecFrame +from .utils import DEFAULT_OBSTIME + + +class CIRS(BaseRADecFrame): + """ + A coordinate or frame in the Celestial Intermediate Reference System (CIRS). + + The frame attributes are listed under **Other Parameters**. + + {params} + + Other parameters + ---------------- + obstime : `~astropy.time.Time` + The time at which the observation is taken. Used for determining the + position of the Earth and its precession. + """ + + obstime = TimeAttribute(default=DEFAULT_OBSTIME) + + +CIRS.__doc__ = CIRS.__doc__.format(params=_base_radec_docstring) + +# The "self-transform" is defined in icrs_cirs_transformations.py, because in +# the current implementation it goes through ICRS (like GCRS) diff --git a/astropy/coordinates/builtin_frames/cirs_observed_transforms.py b/astropy/coordinates/builtin_frames/cirs_observed_transforms.py new file mode 100644 index 0000000..6ec4c14 --- /dev/null +++ b/astropy/coordinates/builtin_frames/cirs_observed_transforms.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Contains the transformation functions for getting to "observed" systems from CIRS. +Currently that just means AltAz. +""" +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +import numpy as np + +from ... import units as u +from ..baseframe import frame_transform_graph +from ..transformations import FunctionTransformWithFiniteDifference +from ..representation import (SphericalRepresentation, + UnitSphericalRepresentation) +from ... import _erfa as erfa + +from .cirs import CIRS +from .altaz import AltAz +from .utils import get_polar_motion, get_dut1utc, get_jd12, PIOVER2 + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, AltAz) +def cirs_to_altaz(cirs_coo, altaz_frame): + if np.any(cirs_coo.obstime != altaz_frame.obstime): + # the only frame attribute for the current CIRS is the obstime, but this + # would need to be updated if a future change allowed specifying an + # Earth location algorithm or something + cirs_coo = cirs_coo.transform_to(CIRS(obstime=altaz_frame.obstime)) + + # we use the same obstime everywhere now that we know they're the same + obstime = cirs_coo.obstime + + # if the data are UnitSphericalRepresentation, we can skip the distance calculations + is_unitspherical = (isinstance(cirs_coo.data, UnitSphericalRepresentation) or + cirs_coo.cartesian.x.unit == u.one) + + if is_unitspherical: + usrepr = cirs_coo.represent_as(UnitSphericalRepresentation) + cirs_ra = usrepr.lon.to_value(u.radian) + cirs_dec = usrepr.lat.to_value(u.radian) + else: + # compute an "astrometric" ra/dec -i.e., the direction of the + # displacement vector from the observer to the target in CIRS + loccirs = altaz_frame.location.get_itrs(cirs_coo.obstime).transform_to(cirs_coo) + diffrepr = (cirs_coo.cartesian - loccirs.cartesian).represent_as(UnitSphericalRepresentation) + + cirs_ra = diffrepr.lon.to_value(u.radian) + cirs_dec = diffrepr.lat.to_value(u.radian) + + lon, lat, height = altaz_frame.location.to_geodetic('WGS84') + xp, yp = get_polar_motion(obstime) + + # first set up the astrometry context for CIRS<->AltAz + jd1, jd2 = get_jd12(obstime, 'utc') + astrom = erfa.apio13(jd1, jd2, + get_dut1utc(obstime), + lon.to_value(u.radian), lat.to_value(u.radian), + height.to_value(u.m), + xp, yp, # polar motion + # all below are already in correct units because they are QuantityFrameAttribues + altaz_frame.pressure.value, + altaz_frame.temperature.value, + altaz_frame.relative_humidity, + altaz_frame.obswl.value) + + az, zen, _, _, _ = erfa.atioq(cirs_ra, cirs_dec, astrom) + + if is_unitspherical: + rep = UnitSphericalRepresentation(lat=u.Quantity(PIOVER2 - zen, u.radian, copy=False), + lon=u.Quantity(az, u.radian, copy=False), + copy=False) + else: + # now we get the distance as the cartesian distance from the earth + # location to the coordinate location + locitrs = altaz_frame.location.get_itrs(obstime) + distance = locitrs.separation_3d(cirs_coo) + rep = SphericalRepresentation(lat=u.Quantity(PIOVER2 - zen, u.radian, copy=False), + lon=u.Quantity(az, u.radian, copy=False), + distance=distance, + copy=False) + return altaz_frame.realize_frame(rep) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, CIRS) +def altaz_to_cirs(altaz_coo, cirs_frame): + usrepr = altaz_coo.represent_as(UnitSphericalRepresentation) + az = usrepr.lon.to_value(u.radian) + zen = PIOVER2 - usrepr.lat.to_value(u.radian) + + lon, lat, height = altaz_coo.location.to_geodetic('WGS84') + xp, yp = get_polar_motion(altaz_coo.obstime) + + # first set up the astrometry context for ICRS<->CIRS at the altaz_coo time + jd1, jd2 = get_jd12(altaz_coo.obstime, 'utc') + astrom = erfa.apio13(jd1, jd2, + get_dut1utc(altaz_coo.obstime), + lon.to_value(u.radian), lat.to_value(u.radian), + height.to_value(u.m), + xp, yp, # polar motion + # all below are already in correct units because they are QuantityFrameAttribues + altaz_coo.pressure.value, + altaz_coo.temperature.value, + altaz_coo.relative_humidity, + altaz_coo.obswl.value) + + # the 'A' indicates zen/az inputs + cirs_ra, cirs_dec = erfa.atoiq('A', az, zen, astrom)*u.radian + if isinstance(altaz_coo.data, UnitSphericalRepresentation) or altaz_coo.cartesian.x.unit == u.one: + cirs_at_aa_time = CIRS(ra=cirs_ra, dec=cirs_dec, distance=None, + obstime=altaz_coo.obstime) + else: + # treat the output of atoiq as an "astrometric" RA/DEC, so to get the + # actual RA/Dec from the observers vantage point, we have to reverse + # the vector operation of cirs_to_altaz (see there for more detail) + + loccirs = altaz_coo.location.get_itrs(altaz_coo.obstime).transform_to(cirs_frame) + + astrometric_rep = SphericalRepresentation(lon=cirs_ra, lat=cirs_dec, + distance=altaz_coo.distance) + newrepr = astrometric_rep + loccirs.cartesian + cirs_at_aa_time = CIRS(newrepr, obstime=altaz_coo.obstime) + + # this final transform may be a no-op if the obstimes are the same + return cirs_at_aa_time.transform_to(cirs_frame) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, AltAz) +def altaz_to_altaz(from_coo, to_frame): + # for now we just implement this through CIRS to make sure we get everything + # covered + return from_coo.transform_to(CIRS(obstime=from_coo.obstime)).transform_to(to_frame) diff --git a/astropy/coordinates/builtin_frames/ecliptic.py b/astropy/coordinates/builtin_frames/ecliptic.py new file mode 100644 index 0000000..077a8e4 --- /dev/null +++ b/astropy/coordinates/builtin_frames/ecliptic.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from ... import units as u +from .. import representation as r +from ..baseframe import BaseCoordinateFrame, RepresentationMapping +from ..attributes import TimeAttribute +from .utils import EQUINOX_J2000, DEFAULT_OBSTIME + +__all__ = ['GeocentricTrueEcliptic', 'BarycentricTrueEcliptic', + 'HeliocentricTrueEcliptic', 'BaseEclipticFrame'] + +_base_ecliptic_docstring = """.. warning:: + In the current version of astropy, the ecliptic frames do not yet have + stringent accuracy tests. We recommend you test to "known-good" cases + to ensure this frames are what you are looking for. (and then ideally + you would contribute these tests to Astropy!) + + Parameters + ---------- + representation : `BaseRepresentation` or None + A representation object or None to have no data (or use the other keywords) + + lon : `Angle`, optional, must be keyword + The ecliptic longitude for this object (``lat`` must also be given and + ``representation`` must be None). + lat : `Angle`, optional, must be keyword + The ecliptic latitude for this object (``lon`` must also be given and + ``representation`` must be None). + distance : `~astropy.units.Quantity`, optional, must be keyword + The distance for this object from the {0}. + (``representation`` must be None). + + pm_lon_coslat : `Angle`, optional, must be keyword + The proper motion in the ecliptic longitude (including the ``cos(lat)`` + factor) for this object (``pm_lat`` must also be given). + pm_lat : `Angle`, optional, must be keyword + The proper motion in the ecliptic latitude for this object + (``pm_lon_coslat`` must also be given). + distance : `~astropy.units.Quantity`, optional, must be keyword + The distance for this object from the {0}. + (``representation`` must be None). + + copy : bool, optional + If `True` (default), make copies of the input coordinate arrays. + Can only be passed in as a keyword argument. + + differential_cls : `BaseDifferential`, dict, optional + A differential class or dictionary of differential classes (currently + only a velocity differential with key 's' is supported). This sets + the expected input differential class, thereby changing the expected + keyword arguments of the data passed in. For example, passing + ``differential_cls=CartesianDifferential`` will make the classes + expect velocity data with the argument names ``v_x, v_y, v_z``. +""" + + +class BaseEclipticFrame(BaseCoordinateFrame): + """ + A base class for frames that have names and conventions like that of + ecliptic frames. + + {params} + """ + + frame_specific_representation_info = { + r.SphericalCosLatDifferential: [ + RepresentationMapping('d_lon_coslat', 'pm_lon_coslat', u.mas/u.yr), + RepresentationMapping('d_lat', 'pm_lat', u.mas/u.yr), + RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s), + ], + r.SphericalDifferential: [ + RepresentationMapping('d_lon', 'pm_lon', u.mas/u.yr), + RepresentationMapping('d_lat', 'pm_lat', u.mas/u.yr), + RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s), + ], + r.CartesianDifferential: [ + RepresentationMapping('d_x', 'v_x', u.km/u.s), + RepresentationMapping('d_y', 'v_y', u.km/u.s), + RepresentationMapping('d_z', 'v_z', u.km/u.s), + ], + } + + frame_specific_representation_info[r.UnitSphericalCosLatDifferential] = \ + frame_specific_representation_info[r.SphericalCosLatDifferential] + frame_specific_representation_info[r.UnitSphericalDifferential] = \ + frame_specific_representation_info[r.SphericalDifferential] + + default_representation = r.SphericalRepresentation + default_differential = r.SphericalCosLatDifferential + + +BaseEclipticFrame.__doc__ = BaseEclipticFrame.__doc__.format( + params=_base_ecliptic_docstring) + + +class GeocentricTrueEcliptic(BaseEclipticFrame): + """ + Geocentric ecliptic coordinates. These origin of the coordinates are the + geocenter (Earth), with the x axis pointing to the *true* (not mean) equinox + at the time specified by the ``equinox`` attribute, and the xy-plane in the + plane of the ecliptic for that date. + + Be aware that the definition of "geocentric" here means that this frame + *includes* light deflection from the sun, aberration, etc when transforming + to/from e.g. ICRS. + + The frame attributes are listed under **Other Parameters**. + + {params} + + Other parameters + ---------------- + equinox : `~astropy.time.Time` + The date to assume for this frame. Determines the location of the + x-axis and the location of the Earth (necessary for transformation to + non-geocentric systems). + """ + + equinox = TimeAttribute(default=EQUINOX_J2000) + + +GeocentricTrueEcliptic.__doc__ = GeocentricTrueEcliptic.__doc__.format( + params=_base_ecliptic_docstring.format("geocenter")) + + +class BarycentricTrueEcliptic(BaseEclipticFrame): + """ + Barycentric ecliptic coordinates. These origin of the coordinates are the + barycenter of the solar system, with the x axis pointing in the direction of + the *true* (not mean) equinox as at the time specified by the ``equinox`` + attribute (as seen from Earth), and the xy-plane in the plane of the + ecliptic for that date. + + The frame attributes are listed under **Other Parameters**. + + {params} + + Other parameters + ---------------- + equinox : `~astropy.time.Time` + The date to assume for this frame. Determines the location of the + x-axis and the location of the Earth and Sun. + """ + + equinox = TimeAttribute(default=EQUINOX_J2000) + + +BarycentricTrueEcliptic.__doc__ = BarycentricTrueEcliptic.__doc__.format( + params=_base_ecliptic_docstring.format("sun's center")) + + +class HeliocentricTrueEcliptic(BaseEclipticFrame): + """ + Heliocentric ecliptic coordinates. These origin of the coordinates are the + center of the sun, with the x axis pointing in the direction of + the *true* (not mean) equinox as at the time specified by the ``equinox`` + attribute (as seen from Earth), and the xy-plane in the plane of the + ecliptic for that date. + + The frame attributes are listed under **Other Parameters**. + + {params} + + Other parameters + ---------------- + equinox : `~astropy.time.Time` + The date to assume for this frame. Determines the location of the + x-axis and the location of the Earth and Sun. + """ + + equinox = TimeAttribute(default=EQUINOX_J2000) + obstime = TimeAttribute(default=DEFAULT_OBSTIME) + + +HeliocentricTrueEcliptic.__doc__ = HeliocentricTrueEcliptic.__doc__.format( + params=_base_ecliptic_docstring.format("sun's center")) diff --git a/astropy/coordinates/builtin_frames/ecliptic_transforms.py b/astropy/coordinates/builtin_frames/ecliptic_transforms.py new file mode 100644 index 0000000..b93f5fe --- /dev/null +++ b/astropy/coordinates/builtin_frames/ecliptic_transforms.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Contains the transformation functions for getting to/from ecliptic systems. +""" +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from ... import units as u +from ..baseframe import frame_transform_graph +from ..transformations import FunctionTransformWithFiniteDifference, DynamicMatrixTransform +from ..matrix_utilities import (rotation_matrix, + matrix_product, matrix_transpose) +from ..representation import CartesianRepresentation +from ... import _erfa as erfa + +from .icrs import ICRS +from .gcrs import GCRS +from .ecliptic import GeocentricTrueEcliptic, BarycentricTrueEcliptic, HeliocentricTrueEcliptic +from .utils import get_jd12 +from ..errors import UnitsError + + +def _ecliptic_rotation_matrix(equinox): + # This code calls pmat06 from ERFA, which retrieves the precession + # matrix (including frame bias) according to the IAU 2006 model, but + # leaves out the nutation. This matches what ERFA does in the ecm06 + # function and also brings the results closer to what other libraries + # give (see https://github.com/astropy/astropy/pull/6508). However, + # notice that this makes the name "TrueEcliptic" misleading, and might + # be changed in the future (discussion in the same pull request) + jd1, jd2 = get_jd12(equinox, 'tt') + rbp = erfa.pmat06(jd1, jd2) + obl = erfa.obl06(jd1, jd2)*u.radian + return matrix_product(rotation_matrix(obl, 'x'), rbp) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, + GCRS, GeocentricTrueEcliptic, + finite_difference_frameattr_name='equinox') +def gcrs_to_geoecliptic(gcrs_coo, to_frame): + # first get us to a 0 pos/vel GCRS at the target equinox + gcrs_coo2 = gcrs_coo.transform_to(GCRS(obstime=to_frame.equinox)) + + rmat = _ecliptic_rotation_matrix(to_frame.equinox) + newrepr = gcrs_coo2.cartesian.transform(rmat) + return to_frame.realize_frame(newrepr) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GeocentricTrueEcliptic, GCRS) +def geoecliptic_to_gcrs(from_coo, gcrs_frame): + rmat = _ecliptic_rotation_matrix(from_coo.equinox) + newrepr = from_coo.cartesian.transform(matrix_transpose(rmat)) + gcrs = GCRS(newrepr, obstime=from_coo.equinox) + + # now do any needed offsets (no-op if same obstime and 0 pos/vel) + return gcrs.transform_to(gcrs_frame) + + +@frame_transform_graph.transform(DynamicMatrixTransform, ICRS, BarycentricTrueEcliptic) +def icrs_to_baryecliptic(from_coo, to_frame): + return _ecliptic_rotation_matrix(to_frame.equinox) + + +@frame_transform_graph.transform(DynamicMatrixTransform, BarycentricTrueEcliptic, ICRS) +def baryecliptic_to_icrs(from_coo, to_frame): + return matrix_transpose(icrs_to_baryecliptic(to_frame, from_coo)) + + +_NEED_ORIGIN_HINT = ("The input {0} coordinates do not have length units. This " + "probably means you created coordinates with lat/lon but " + "no distance. Heliocentric<->ICRS transforms cannot " + "function in this case because there is an origin shift.") + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, + ICRS, HeliocentricTrueEcliptic, + finite_difference_frameattr_name='equinox') +def icrs_to_helioecliptic(from_coo, to_frame): + if not u.m.is_equivalent(from_coo.cartesian.x.unit): + raise UnitsError(_NEED_ORIGIN_HINT.format(from_coo.__class__.__name__)) + + # get barycentric sun coordinate + # this goes here to avoid circular import errors + from ..solar_system import get_body_barycentric + bary_sun_pos = get_body_barycentric('sun', to_frame.obstime) + + # offset to heliocentric + heliocart = from_coo.cartesian - bary_sun_pos + + # now compute the matrix to precess to the right orientation + rmat = _ecliptic_rotation_matrix(to_frame.equinox) + + newrepr = heliocart.transform(rmat) + return to_frame.realize_frame(newrepr) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, + HeliocentricTrueEcliptic, ICRS, + finite_difference_frameattr_name='equinox') +def helioecliptic_to_icrs(from_coo, to_frame): + if not u.m.is_equivalent(from_coo.cartesian.x.unit): + raise UnitsError(_NEED_ORIGIN_HINT.format(from_coo.__class__.__name__)) + + # first un-precess from ecliptic to ICRS orientation + rmat = _ecliptic_rotation_matrix(from_coo.equinox) + intermed_repr = from_coo.cartesian.transform(matrix_transpose(rmat)) + + # now offset back to barycentric, which is the correct center for ICRS + + # this goes here to avoid circular import errors + from ..solar_system import get_body_barycentric + + # get barycentric sun coordinate + bary_sun_pos = get_body_barycentric('sun', from_coo.obstime) + + newrepr = intermed_repr + bary_sun_pos + return to_frame.realize_frame(newrepr) diff --git a/astropy/coordinates/builtin_frames/fk4.py b/astropy/coordinates/builtin_frames/fk4.py new file mode 100644 index 0000000..1484da6 --- /dev/null +++ b/astropy/coordinates/builtin_frames/fk4.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +import numpy as np + +from ...extern.six.moves import range +from ... import units as u +from ..baseframe import frame_transform_graph +from ..attributes import TimeAttribute +from ..transformations import (FunctionTransformWithFiniteDifference, + FunctionTransform, DynamicMatrixTransform) +from ..representation import (CartesianRepresentation, + UnitSphericalRepresentation) +from .. import earth_orientation as earth + +from .utils import EQUINOX_B1950 +from .baseradec import _base_radec_docstring, BaseRADecFrame + + +class FK4(BaseRADecFrame): + """ + A coordinate or frame in the FK4 system. + + Note that this is a barycentric version of FK4 - that is, the origin for + this frame is the Solar System Barycenter, *not* the Earth geocenter. + + The frame attributes are listed under **Other Parameters**. + + {params} + + Other parameters + ---------------- + equinox : `~astropy.time.Time` + The equinox of this frame. + obstime : `~astropy.time.Time` + The time this frame was observed. If ``None``, will be the same as + ``equinox``. + """ + + equinox = TimeAttribute(default=EQUINOX_B1950) + obstime = TimeAttribute(default=None, secondary_attribute='equinox') + + +FK4.__doc__ = FK4.__doc__.format(params=_base_radec_docstring) + +# the "self" transform + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4, FK4) +def fk4_to_fk4(fk4coord1, fk4frame2): + # deceptively complicated: need to transform to No E-terms FK4, precess, and + # then come back, because precession is non-trivial with E-terms + fnoe_w_eqx1 = fk4coord1.transform_to(FK4NoETerms(equinox=fk4coord1.equinox)) + fnoe_w_eqx2 = fnoe_w_eqx1.transform_to(FK4NoETerms(equinox=fk4frame2.equinox)) + return fnoe_w_eqx2.transform_to(fk4frame2) + + +class FK4NoETerms(BaseRADecFrame): + """ + A coordinate or frame in the FK4 system, but with the E-terms of aberration + removed. + + The frame attributes are listed under **Other Parameters**. + + {params} + + Other parameters + ---------------- + equinox : `~astropy.time.Time` + The equinox of this frame. + obstime : `~astropy.time.Time` + The time this frame was observed. If ``None``, will be the same as + ``equinox``. + """ + + equinox = TimeAttribute(default=EQUINOX_B1950) + obstime = TimeAttribute(default=None, secondary_attribute='equinox') + + @staticmethod + def _precession_matrix(oldequinox, newequinox): + """ + Compute and return the precession matrix for FK4 using Newcomb's method. + Used inside some of the transformation functions. + + Parameters + ---------- + oldequinox : `~astropy.time.Time` + The equinox to precess from. + newequinox : `~astropy.time.Time` + The equinox to precess to. + + Returns + ------- + newcoord : array + The precession matrix to transform to the new equinox + """ + return earth._precession_matrix_besselian(oldequinox.byear, newequinox.byear) + + +FK4NoETerms.__doc__ = FK4NoETerms.__doc__.format(params=_base_radec_docstring) + +# the "self" transform + + +@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, FK4NoETerms) +def fk4noe_to_fk4noe(fk4necoord1, fk4neframe2): + return fk4necoord1._precession_matrix(fk4necoord1.equinox, fk4neframe2.equinox) + + +# FK4-NO-E to/from FK4 -----------------------------> +# Unlike other frames, this module include *two* frame classes for FK4 +# coordinates - one including the E-terms of aberration (FK4), and +# one not including them (FK4NoETerms). The following functions +# implement the transformation between these two. +def fk4_e_terms(equinox): + """ + Return the e-terms of aberation vector + + Parameters + ---------- + equinox : Time object + The equinox for which to compute the e-terms + """ + # Constant of aberration at J2000; from Explanatory Supplement to the + # Astronomical Almanac (Seidelmann, 2005). + k = 0.0056932 # in degrees (v_earth/c ~ 1e-4 rad ~ 0.0057 deg) + k = np.radians(k) + + # Eccentricity of the Earth's orbit + e = earth.eccentricity(equinox.jd) + + # Mean longitude of perigee of the solar orbit + g = earth.mean_lon_of_perigee(equinox.jd) + g = np.radians(g) + + # Obliquity of the ecliptic + o = earth.obliquity(equinox.jd, algorithm=1980) + o = np.radians(o) + + return e * k * np.sin(g), \ + -e * k * np.cos(g) * np.cos(o), \ + -e * k * np.cos(g) * np.sin(o) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4, FK4NoETerms) +def fk4_to_fk4_no_e(fk4coord, fk4noeframe): + # Extract cartesian vector + rep = fk4coord.cartesian + + # Find distance (for re-normalization) + d_orig = rep.norm() + rep /= d_orig + + # Apply E-terms of aberration. Note that this depends on the equinox (not + # the observing time/epoch) of the coordinates. See issue #1496 for a + # discussion of this. + eterms_a = CartesianRepresentation( + u.Quantity(fk4_e_terms(fk4coord.equinox), u.dimensionless_unscaled, + copy=False), copy=False) + rep = rep - eterms_a + eterms_a.dot(rep) * rep + + # Find new distance (for re-normalization) + d_new = rep.norm() + + # Renormalize + rep *= d_orig / d_new + + # now re-cast into an appropriate Representation, and precess if need be + if isinstance(fk4coord.data, UnitSphericalRepresentation): + rep = rep.represent_as(UnitSphericalRepresentation) + + # if no obstime was given in the new frame, use the old one for consistency + newobstime = fk4coord._obstime if fk4noeframe._obstime is None else fk4noeframe._obstime + + fk4noe = FK4NoETerms(rep, equinox=fk4coord.equinox, obstime=newobstime) + if fk4coord.equinox != fk4noeframe.equinox: + # precession + fk4noe = fk4noe.transform_to(fk4noeframe) + return fk4noe + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4NoETerms, FK4) +def fk4_no_e_to_fk4(fk4noecoord, fk4frame): + # first precess, if necessary + if fk4noecoord.equinox != fk4frame.equinox: + fk4noe_w_fk4equinox = FK4NoETerms(equinox=fk4frame.equinox, + obstime=fk4noecoord.obstime) + fk4noecoord = fk4noecoord.transform_to(fk4noe_w_fk4equinox) + + # Extract cartesian vector + rep = fk4noecoord.cartesian + + # Find distance (for re-normalization) + d_orig = rep.norm() + rep /= d_orig + + # Apply E-terms of aberration. Note that this depends on the equinox (not + # the observing time/epoch) of the coordinates. See issue #1496 for a + # discussion of this. + eterms_a = CartesianRepresentation( + u.Quantity(fk4_e_terms(fk4noecoord.equinox), u.dimensionless_unscaled, + copy=False), copy=False) + + rep0 = rep.copy() + for _ in range(10): + rep = (eterms_a + rep0) / (1. + eterms_a.dot(rep)) + + # Find new distance (for re-normalization) + d_new = rep.norm() + + # Renormalize + rep *= d_orig / d_new + + # now re-cast into an appropriate Representation, and precess if need be + if isinstance(fk4noecoord.data, UnitSphericalRepresentation): + rep = rep.represent_as(UnitSphericalRepresentation) + + return fk4frame.realize_frame(rep) diff --git a/astropy/coordinates/builtin_frames/fk4_fk5_transforms.py b/astropy/coordinates/builtin_frames/fk4_fk5_transforms.py new file mode 100644 index 0000000..c9b2db3 --- /dev/null +++ b/astropy/coordinates/builtin_frames/fk4_fk5_transforms.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + + +import numpy as np + +from ..baseframe import frame_transform_graph +from ..transformations import DynamicMatrixTransform +from ..matrix_utilities import matrix_product, matrix_transpose + + +from .fk4 import FK4NoETerms +from .fk5 import FK5 +from .utils import EQUINOX_B1950, EQUINOX_J2000 + + +# FK5 to/from FK4 -------------------> +# B1950->J2000 matrix from Murray 1989 A&A 218,325 eqn 28 +_B1950_TO_J2000_M = np.array( + [[0.9999256794956877, -0.0111814832204662, -0.0048590038153592], + [0.0111814832391717, 0.9999374848933135, -0.0000271625947142], + [0.0048590037723143, -0.0000271702937440, 0.9999881946023742]]) + +_FK4_CORR = np.array( + [[-0.0026455262, -1.1539918689, +2.1111346190], + [+1.1540628161, -0.0129042997, +0.0236021478], + [-2.1112979048, -0.0056024448, +0.0102587734]]) * 1.e-6 + + +def _fk4_B_matrix(obstime): + """ + This is a correction term in the FK4 transformations because FK4 is a + rotating system - see Murray 89 eqn 29 + """ + # Note this is *julian century*, not besselian + T = (obstime.jyear - 1950.) / 100. + if getattr(T, 'shape', ()): + # Ensure we broadcast possibly arrays of times properly. + T.shape += (1, 1) + return _B1950_TO_J2000_M + _FK4_CORR * T + + +# This transformation can't be static because the observation date is needed. +@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, FK5) +def fk4_no_e_to_fk5(fk4noecoord, fk5frame): + # Correction terms for FK4 being a rotating system + B = _fk4_B_matrix(fk4noecoord.obstime) + + # construct both precession matricies - if the equinoxes are B1950 and + # J2000, these are just identity matricies + pmat1 = fk4noecoord._precession_matrix(fk4noecoord.equinox, EQUINOX_B1950) + pmat2 = fk5frame._precession_matrix(EQUINOX_J2000, fk5frame.equinox) + + return matrix_product(pmat2, B, pmat1) + + +# This transformation can't be static because the observation date is needed. +@frame_transform_graph.transform(DynamicMatrixTransform, FK5, FK4NoETerms) +def fk5_to_fk4_no_e(fk5coord, fk4noeframe): + # Get transposed version of the rotating correction terms... so with the + # transpose this takes us from FK5/J200 to FK4/B1950 + B = matrix_transpose(_fk4_B_matrix(fk4noeframe.obstime)) + + # construct both precession matricies - if the equinoxes are B1950 and + # J2000, these are just identity matricies + pmat1 = fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000) + pmat2 = fk4noeframe._precession_matrix(EQUINOX_B1950, fk4noeframe.equinox) + + return matrix_product(pmat2, B, pmat1) diff --git a/astropy/coordinates/builtin_frames/fk5.py b/astropy/coordinates/builtin_frames/fk5.py new file mode 100644 index 0000000..2a84c16 --- /dev/null +++ b/astropy/coordinates/builtin_frames/fk5.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from ..baseframe import frame_transform_graph +from ..attributes import TimeAttribute +from ..transformations import DynamicMatrixTransform +from .. import earth_orientation as earth + +from .baseradec import _base_radec_docstring, BaseRADecFrame +from .utils import EQUINOX_J2000 + + +class FK5(BaseRADecFrame): + """ + A coordinate or frame in the FK5 system. + + Note that this is a barycentric version of FK5 - that is, the origin for + this frame is the Solar System Barycenter, *not* the Earth geocenter. + + The frame attributes are listed under **Other Parameters**. + + {params} + + Other parameters + ---------------- + equinox : `~astropy.time.Time` + The equinox of this frame. + """ + + equinox = TimeAttribute(default=EQUINOX_J2000) + + @staticmethod + def _precession_matrix(oldequinox, newequinox): + """ + Compute and return the precession matrix for FK5 based on Capitaine et + al. 2003/IAU2006. Used inside some of the transformation functions. + + Parameters + ---------- + oldequinox : `~astropy.time.Time` + The equinox to precess from. + newequinox : `~astropy.time.Time` + The equinox to precess to. + + Returns + ------- + newcoord : array + The precession matrix to transform to the new equinox + """ + return earth.precession_matrix_Capitaine(oldequinox, newequinox) + + +FK5.__doc__ = FK5.__doc__.format(params=_base_radec_docstring) + +# This is the "self-transform". Defined at module level because the decorator +# needs a reference to the FK5 class + + +@frame_transform_graph.transform(DynamicMatrixTransform, FK5, FK5) +def fk5_to_fk5(fk5coord1, fk5frame2): + return fk5coord1._precession_matrix(fk5coord1.equinox, fk5frame2.equinox) diff --git a/astropy/coordinates/builtin_frames/galactic.py b/astropy/coordinates/builtin_frames/galactic.py new file mode 100644 index 0000000..c7de38e --- /dev/null +++ b/astropy/coordinates/builtin_frames/galactic.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from ... import units as u +from ..angles import Angle +from .. import representation as r +from ..baseframe import BaseCoordinateFrame, RepresentationMapping + +# these are needed for defining the NGP +from .fk5 import FK5 +from .fk4 import FK4NoETerms + + +class Galactic(BaseCoordinateFrame): + """ + A coordinate or frame in the Galactic coordinate system. + + This frame is used in a variety of Galactic contexts because it has as its + x-y plane the plane of the Milky Way. The positive x direction (i.e., the + l=0, b=0 direction) points to the center of the Milky Way and the z-axis + points toward the North Galactic Pole (following the IAU's 1958 definition + [1]_). However, unlike the `~astropy.coordinates.Galactocentric` frame, the + *origin* of this frame in 3D space is the solar system barycenter, not + the center of the Milky Way. + + Parameters + ---------- + representation : `BaseRepresentation` or None + A representation object or None to have no data (or use the other keywords) + + l : `Angle`, optional, must be keyword + The Galactic longitude for this object (``b`` must also be given and + ``representation`` must be None). + b : `Angle`, optional, must be keyword + The Galactic latitude for this object (``l`` must also be given and + ``representation`` must be None). + distance : `~astropy.units.Quantity`, optional, must be keyword + The Distance for this object along the line-of-sight. + + pm_l_cosb : :class:`~astropy.units.Quantity`, optional, must be keyword + The proper motion in Galactic longitude (including the ``cos(b)`` term) + for this object (``pm_b`` must also be given). + pm_b : :class:`~astropy.units.Quantity`, optional, must be keyword + The proper motion in Galactic latitude for this object (``pm_l_cosb`` + must also be given). + radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword + The radial velocity of this object. + + copy : bool, optional + If `True` (default), make copies of the input coordinate arrays. + Can only be passed in as a keyword argument. + + differential_cls : `BaseDifferential`, dict, optional + A differential class or dictionary of differential classes (currently + only a velocity differential with key 's' is supported). This sets + the expected input differential class, thereby changing the expected + keyword arguments of the data passed in. For example, passing + ``differential_cls=CartesianDifferential`` will make the classes + expect velocity data with the argument names ``v_x, v_y, v_z``. + + Notes + ----- + .. [1] Blaauw, A.; Gum, C. S.; Pawsey, J. L.; Westerhout, G. (1960), "The + new I.A.U. system of galactic coordinates (1958 revision)," + `MNRAS, Vol 121, pp.123 `_. + """ + + frame_specific_representation_info = { + r.SphericalRepresentation: [ + RepresentationMapping('lon', 'l'), + RepresentationMapping('lat', 'b') + ], + r.CartesianRepresentation: [ + RepresentationMapping('x', 'u'), + RepresentationMapping('y', 'v'), + RepresentationMapping('z', 'w') + ], + r.CartesianDifferential: [ + RepresentationMapping('d_x', 'U', u.km/u.s), + RepresentationMapping('d_y', 'V', u.km/u.s), + RepresentationMapping('d_z', 'W', u.km/u.s) + ], + r.SphericalCosLatDifferential: [ + RepresentationMapping('d_lon_coslat', 'pm_l_cosb', u.mas/u.yr), + RepresentationMapping('d_lat', 'pm_b', u.mas/u.yr), + RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s), + ], + r.SphericalDifferential: [ + RepresentationMapping('d_lon', 'pm_l', u.mas/u.yr), + RepresentationMapping('d_lat', 'pm_b', u.mas/u.yr), + RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s), + ] + } + frame_specific_representation_info[r.UnitSphericalRepresentation] = \ + frame_specific_representation_info[r.SphericalRepresentation] + frame_specific_representation_info[r.UnitSphericalCosLatDifferential] = \ + frame_specific_representation_info[r.SphericalCosLatDifferential] + frame_specific_representation_info[r.UnitSphericalDifferential] = \ + frame_specific_representation_info[r.SphericalDifferential] + + default_representation = r.SphericalRepresentation + default_differential = r.SphericalCosLatDifferential + + # North galactic pole and zeropoint of l in FK4/FK5 coordinates. Needed for + # transformations to/from FK4/5 + + # These are from the IAU's definition of galactic coordinates + _ngp_B1950 = FK4NoETerms(ra=192.25*u.degree, dec=27.4*u.degree) + _lon0_B1950 = Angle(123, u.degree) + + # These are *not* from Reid & Brunthaler 2004 - instead, they were + # derived by doing: + # + # >>> FK4NoETerms(ra=192.25*u.degree, dec=27.4*u.degree).transform_to(FK5) + # + # This gives better consistency with other codes than using the values + # from Reid & Brunthaler 2004 and the best self-consistency between FK5 + # -> Galactic and FK5 -> FK4 -> Galactic. The lon0 angle was found by + # optimizing the self-consistency. + _ngp_J2000 = FK5(ra=192.8594812065348*u.degree, dec=27.12825118085622*u.degree) + _lon0_J2000 = Angle(122.9319185680026, u.degree) diff --git a/astropy/coordinates/builtin_frames/galactic_transforms.py b/astropy/coordinates/builtin_frames/galactic_transforms.py new file mode 100644 index 0000000..2682a7d --- /dev/null +++ b/astropy/coordinates/builtin_frames/galactic_transforms.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from ..matrix_utilities import (rotation_matrix, + matrix_product, matrix_transpose) +from ..baseframe import frame_transform_graph +from ..transformations import DynamicMatrixTransform + +from .fk5 import FK5 +from .fk4 import FK4NoETerms +from .utils import EQUINOX_B1950, EQUINOX_J2000 +from .galactic import Galactic + + +# Galactic to/from FK4/FK5 -----------------------> +# can't be static because the equinox is needed +@frame_transform_graph.transform(DynamicMatrixTransform, FK5, Galactic) +def fk5_to_gal(fk5coord, galframe): + # need precess to J2000 first + pmat = fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000) + mat1 = rotation_matrix(180 - Galactic._lon0_J2000.degree, 'z') + mat2 = rotation_matrix(90 - Galactic._ngp_J2000.dec.degree, 'y') + mat3 = rotation_matrix(Galactic._ngp_J2000.ra.degree, 'z') + + return matrix_product(mat1, mat2, mat3, pmat) + + +@frame_transform_graph.transform(DynamicMatrixTransform, Galactic, FK5) +def _gal_to_fk5(galcoord, fk5frame): + return matrix_transpose(fk5_to_gal(fk5frame, galcoord)) + + +@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, Galactic) +def fk4_to_gal(fk4coords, galframe): + mat1 = rotation_matrix(180 - Galactic._lon0_B1950.degree, 'z') + mat2 = rotation_matrix(90 - Galactic._ngp_B1950.dec.degree, 'y') + mat3 = rotation_matrix(Galactic._ngp_B1950.ra.degree, 'z') + matprec = fk4coords._precession_matrix(fk4coords.equinox, EQUINOX_B1950) + + return matrix_product(mat1, mat2, mat3, matprec) + + +@frame_transform_graph.transform(DynamicMatrixTransform, Galactic, FK4NoETerms) +def gal_to_fk4(galcoords, fk4frame): + return matrix_transpose(fk4_to_gal(fk4frame, galcoords)) diff --git a/astropy/coordinates/builtin_frames/galactocentric.py b/astropy/coordinates/builtin_frames/galactocentric.py new file mode 100644 index 0000000..0cf900d --- /dev/null +++ b/astropy/coordinates/builtin_frames/galactocentric.py @@ -0,0 +1,311 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +import warnings + +import numpy as np + +from ... import units as u +from ...utils.exceptions import AstropyDeprecationWarning +from ..angles import Angle +from ..matrix_utilities import rotation_matrix, matrix_product, matrix_transpose +from .. import representation as r +from ..baseframe import (BaseCoordinateFrame, frame_transform_graph, + RepresentationMapping) +from ..attributes import (Attribute, CoordinateAttribute, + QuantityAttribute, + DifferentialAttribute) +from ..transformations import AffineTransform +from ..errors import ConvertError + +from .icrs import ICRS + +# Measured by minimizing the difference between a plane of coordinates along +# l=0, b=[-90,90] and the Galactocentric x-z plane +# This is not used directly, but accessed via `get_roll0`. We define it here to +# prevent having to create new Angle objects every time `get_roll0` is called. +_ROLL0 = Angle(58.5986320306*u.degree) + + +class Galactocentric(BaseCoordinateFrame): + r""" + A coordinate or frame in the Galactocentric system. This frame + requires specifying the Sun-Galactic center distance, and optionally + the height of the Sun above the Galactic midplane. + + The position of the Sun is assumed to be on the x axis of the final, + right-handed system. That is, the x axis points from the position of + the Sun projected to the Galactic midplane to the Galactic center -- + roughly towards :math:`(l,b) = (0^\circ,0^\circ)`. For the default + transformation (:math:`{\rm roll}=0^\circ`), the y axis points roughly + towards Galactic longitude :math:`l=90^\circ`, and the z axis points + roughly towards the North Galactic Pole (:math:`b=90^\circ`). + + The default position of the Galactic Center in ICRS coordinates is + taken from Reid et al. 2004, + http://adsabs.harvard.edu/abs/2004ApJ...616..872R. + + .. math:: + + {\rm RA} = 17:45:37.224~{\rm hr}\\ + {\rm Dec} = -28:56:10.23~{\rm deg} + + The default distance to the Galactic Center is 8.3 kpc, e.g., + Gillessen et al. (2009), + https://ui.adsabs.harvard.edu/#abs/2009ApJ...692.1075G/abstract + + The default height of the Sun above the Galactic midplane is taken to + be 27 pc, as measured by Chen et al. (2001), + https://ui.adsabs.harvard.edu/#abs/2001ApJ...553..184C/abstract + + The default solar motion relative to the Galactic center is taken from a + combination of Schönrich et al. (2010) [for the peculiar velocity] and + Bovy (2015) [for the circular velocity at the solar radius], + https://ui.adsabs.harvard.edu/#abs/2010MNRAS.403.1829S/abstract + https://ui.adsabs.harvard.edu/#abs/2015ApJS..216...29B/abstract + + For a more detailed look at the math behind this transformation, see + the document :ref:`coordinates-galactocentric`. + + The frame attributes are listed under **Other Parameters**. + + Parameters + ---------- + representation : `~astropy.coordinates.representation.BaseRepresentation` or None + A representation object or None to have no data (or use the other + keywords) + + x : `~astropy.units.Quantity`, optional + Cartesian, Galactocentric :math:`x` position component. + y : `~astropy.units.Quantity`, optional + Cartesian, Galactocentric :math:`y` position component. + z : `~astropy.units.Quantity`, optional + Cartesian, Galactocentric :math:`z` position component. + + v_x : `~astropy.units.Quantity`, optional + Cartesian, Galactocentric :math:`v_x` velocity component. + v_y : `~astropy.units.Quantity`, optional + Cartesian, Galactocentric :math:`v_y` velocity component. + v_z : `~astropy.units.Quantity`, optional + Cartesian, Galactocentric :math:`v_z` velocity component. + + copy : bool, optional + If `True` (default), make copies of the input coordinate arrays. + Can only be passed in as a keyword argument. + + differential_cls : `BaseDifferential`, dict, optional + A differential class or dictionary of differential classes (currently + only a velocity differential with key 's' is supported). This sets + the expected input differential class, thereby changing the expected + keyword arguments of the data passed in. For example, passing + ``differential_cls=CartesianDifferential`` will make the classes + expect velocity data with the argument names ``v_x, v_y, v_z``. + + Other parameters + ---------------- + galcen_coord : `ICRS`, optional, must be keyword + The ICRS coordinates of the Galactic center. + galcen_distance : `~astropy.units.Quantity`, optional, must be keyword + The distance from the sun to the Galactic center. + galcen_v_sun : `~astropy.coordinates.representation.CartesianDifferential`, optional, must be keyword + The velocity of the sun *in the Galactocentric frame* as Cartesian + velocity components. + z_sun : `~astropy.units.Quantity`, optional, must be keyword + The distance from the sun to the Galactic midplane. + roll : `Angle`, optional, must be keyword + The angle to rotate about the final x-axis, relative to the + orientation for Galactic. For example, if this roll angle is 0, + the final x-z plane will align with the Galactic coordinates x-z + plane. Unless you really know what this means, you probably should + not change this! + + Examples + -------- + To transform to the Galactocentric frame with the default + frame attributes, pass the uninstantiated class name to the + ``transform_to()`` method of a coordinate frame or + `~astropy.coordinates.SkyCoord` object:: + + >>> import astropy.units as u + >>> import astropy.coordinates as coord + >>> c = coord.ICRS(ra=[158.3122, 24.5] * u.degree, + ... dec=[-17.3, 81.52] * u.degree, + ... distance=[11.5, 24.12] * u.kpc) + >>> c.transform_to(coord.Galactocentric) # doctest: +FLOAT_CMP + , galcen_distance=8.3 kpc, galcen_v_sun=( 11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg): (x, y, z) in kpc + [( -9.6083819 , -9.40062188, 6.52056066), + (-21.28302307, 18.76334013, 7.84693855)]> + + To specify a custom set of parameters, you have to include extra keyword + arguments when initializing the Galactocentric frame object:: + + >>> c.transform_to(coord.Galactocentric(galcen_distance=8.1*u.kpc)) # doctest: +FLOAT_CMP + , galcen_distance=8.1 kpc, galcen_v_sun=( 11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg): (x, y, z) in kpc + [( -9.40785924, -9.40062188, 6.52066574), + (-21.08239383, 18.76334013, 7.84798135)]> + + Similarly, transforming from the Galactocentric frame to another coordinate frame:: + + >>> c = coord.Galactocentric(x=[-8.3, 4.5] * u.kpc, + ... y=[0., 81.52] * u.kpc, + ... z=[0.027, 24.12] * u.kpc) + >>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP + + + Or, with custom specification of the Galactic center:: + + >>> c = coord.Galactocentric(x=[-8.0, 4.5] * u.kpc, + ... y=[0., 81.52] * u.kpc, + ... z=[21.0, 24120.0] * u.pc, + ... z_sun=21 * u.pc, galcen_distance=8. * u.kpc) + >>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP + + + """ + frame_specific_representation_info = { + r.CartesianDifferential: [ + RepresentationMapping('d_x', 'v_x', u.km/u.s), + RepresentationMapping('d_y', 'v_y', u.km/u.s), + RepresentationMapping('d_z', 'v_z', u.km/u.s), + ], + } + + default_representation = r.CartesianRepresentation + default_differential = r.CartesianDifferential + + # frame attributes + galcen_coord = CoordinateAttribute(default=ICRS(ra=266.4051*u.degree, + dec=-28.936175*u.degree), + frame=ICRS) + galcen_distance = QuantityAttribute(default=8.3*u.kpc) + + galcen_v_sun = DifferentialAttribute( + default=r.CartesianDifferential([11.1, 220+12.24, 7.25] * u.km/u.s), + allowed_classes=[r.CartesianDifferential]) + + z_sun = QuantityAttribute(default=27.*u.pc) + roll = QuantityAttribute(default=0.*u.deg) + + def __init__(self, *args, **kwargs): + + # backwards-compatibility + if ('galcen_ra' in kwargs or 'galcen_dec' in kwargs): + warnings.warn("The arguments 'galcen_ra', and 'galcen_dec' are " + "deprecated in favor of specifying the sky coordinate" + " as a CoordinateAttribute using the 'galcen_coord' " + "argument", AstropyDeprecationWarning) + + galcen_kw = dict() + galcen_kw['ra'] = kwargs.pop('galcen_ra', self.galcen_coord.ra) + galcen_kw['dec'] = kwargs.pop('galcen_dec', self.galcen_coord.dec) + kwargs['galcen_coord'] = ICRS(**galcen_kw) + + super(Galactocentric, self).__init__(*args, **kwargs) + + @property + def galcen_ra(self): + warnings.warn("The attribute 'galcen_ra' is deprecated. Use " + "'.galcen_coord.ra' instead.", AstropyDeprecationWarning) + return self.galcen_coord.ra + + @property + def galcen_dec(self): + warnings.warn("The attribute 'galcen_dec' is deprecated. Use " + "'.galcen_coord.dec' instead.", AstropyDeprecationWarning) + return self.galcen_coord.dec + + @classmethod + def get_roll0(cls): + """ + The additional roll angle (about the final x axis) necessary to align + the final z axis to match the Galactic yz-plane. Setting the ``roll`` + frame attribute to -this method's return value removes this rotation, + allowing the use of the `Galactocentric` frame in more general contexts. + """ + # note that the actual value is defined at the module level. We make at + # a property here because this module isn't actually part of the public + # API, so it's better for it to be accessable from Galactocentric + return _ROLL0 + +# ICRS to/from Galactocentric -----------------------> + + +def get_matrix_vectors(galactocentric_frame, inverse=False): + """ + Use the ``inverse`` argument to get the inverse transformation, matrix and + offsets to go from Galactocentric to ICRS. + """ + # shorthand + gcf = galactocentric_frame + + # rotation matrix to align x(ICRS) with the vector to the Galactic center + mat1 = rotation_matrix(-gcf.galcen_coord.dec, 'y') + mat2 = rotation_matrix(gcf.galcen_coord.ra, 'z') + # extra roll away from the Galactic x-z plane + mat0 = rotation_matrix(gcf.get_roll0() - gcf.roll, 'x') + + # construct transformation matrix and use it + R = matrix_product(mat0, mat1, mat2) + + # Now need to translate by Sun-Galactic center distance around x' and + # rotate about y' to account for tilt due to Sun's height above the plane + translation = r.CartesianRepresentation(gcf.galcen_distance * [1., 0., 0.]) + z_d = gcf.z_sun / gcf.galcen_distance + H = rotation_matrix(-np.arcsin(z_d), 'y') + + # compute total matrices + A = matrix_product(H, R) + + # Now we re-align the translation vector to account for the Sun's height + # above the midplane + offset = -translation.transform(H) + + if inverse: + # the inverse of a rotation matrix is a transpose, which is much faster + # and more stable to compute + A = matrix_transpose(A) + offset = (-offset).transform(A) + offset_v = r.CartesianDifferential.from_cartesian( + (-gcf.galcen_v_sun).to_cartesian().transform(A)) + offset = offset.with_differentials(offset_v) + + else: + offset = offset.with_differentials(gcf.galcen_v_sun) + + return A, offset + + +def _check_coord_repr_diff_types(c): + if isinstance(c.data, r.UnitSphericalRepresentation): + raise ConvertError("Transforming to/from a Galactocentric frame " + "requires a 3D coordinate, e.g. (angle, angle, " + "distance) or (x, y, z).") + + if ('s' in c.data.differentials and + isinstance(c.data.differentials['s'], + (r.UnitSphericalDifferential, + r.UnitSphericalCosLatDifferential, + r.RadialDifferential))): + raise ConvertError("Transforming to/from a Galactocentric frame " + "requires a 3D velocity, e.g., proper motion " + "components and radial velocity.") + + +@frame_transform_graph.transform(AffineTransform, ICRS, Galactocentric) +def icrs_to_galactocentric(icrs_coord, galactocentric_frame): + _check_coord_repr_diff_types(icrs_coord) + return get_matrix_vectors(galactocentric_frame) + + +@frame_transform_graph.transform(AffineTransform, Galactocentric, ICRS) +def galactocentric_to_icrs(galactocentric_coord, icrs_frame): + _check_coord_repr_diff_types(galactocentric_coord) + return get_matrix_vectors(galactocentric_coord, inverse=True) diff --git a/astropy/coordinates/builtin_frames/gcrs.py b/astropy/coordinates/builtin_frames/gcrs.py new file mode 100644 index 0000000..9727253 --- /dev/null +++ b/astropy/coordinates/builtin_frames/gcrs.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from ... import units as u +from ..attributes import (TimeAttribute, + CartesianRepresentationAttribute) +from .utils import DEFAULT_OBSTIME, EQUINOX_J2000 +from .baseradec import _base_radec_docstring, BaseRADecFrame + + +class GCRS(BaseRADecFrame): + """ + A coordinate or frame in the Geocentric Celestial Reference System (GCRS). + + GCRS is distinct form ICRS mainly in that it is relative to the Earth's + center-of-mass rather than the solar system Barycenter. That means this + frame includes the effects of aberration (unlike ICRS). For more background + on the GCRS, see the references provided in the + :ref:`astropy-coordinates-seealso` section of the documentation. (Of + particular note is Section 1.2 of + `USNO Circular 179 `_) + + This frame also includes frames that are defined *relative* to the Earth, + but that are offset (in both position and velocity) from the Earth. + + The frame attributes are listed under **Other Parameters**. + + {params} + + Other parameters + ---------------- + obstime : `~astropy.time.Time` + The time at which the observation is taken. Used for determining the + position of the Earth. + obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity` + The position of the observer relative to the center-of-mass of the + Earth, oriented the same as BCRS/ICRS. Either [0, 0, 0], + `~astropy.coordinates.CartesianRepresentation`, or proper input for one, + i.e., a `~astropy.units.Quantity` with shape (3, ...) and length units. + Defaults to [0, 0, 0], meaning "true" GCRS. + obsgeovel : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity` + The velocity of the observer relative to the center-of-mass of the + Earth, oriented the same as BCRS/ICRS. Either [0, 0, 0], + `~astropy.coordinates.CartesianRepresentation`, or proper input for one, + i.e., a `~astropy.units.Quantity` with shape (3, ...) and velocity + units. Defaults to [0, 0, 0], meaning "true" GCRS. + """ + + obstime = TimeAttribute(default=DEFAULT_OBSTIME) + obsgeoloc = CartesianRepresentationAttribute(default=[0, 0, 0], + unit=u.m) + obsgeovel = CartesianRepresentationAttribute(default=[0, 0, 0], + unit=u.m/u.s) + + +GCRS.__doc__ = GCRS.__doc__.format(params=_base_radec_docstring) + +# The "self-transform" is defined in icrs_cirs_transformations.py, because in +# the current implementation it goes through ICRS (like CIRS) + + +class PrecessedGeocentric(BaseRADecFrame): + """ + A coordinate frame defined in a similar manner as GCRS, but precessed to a + requested (mean) equinox. Note that this does *not* end up the same as + regular GCRS even for J2000 equinox, because the GCRS orientation is fixed + to that of ICRS, which is not quite the same as the dynamical J2000 + orientation. + + The frame attributes are listed under **Other Parameters** + + {params} + + Other parameters + ---------------- + equinox : `~astropy.time.Time` + The (mean) equinox to precess the coordinates to. + obstime : `~astropy.time.Time` + The time at which the observation is taken. Used for determining the + position of the Earth. + obsgeoloc : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity` + The position of the observer relative to the center-of-mass of the Earth, + oriented the same as BCRS/ICRS. Either [0, 0, 0], `~astropy.coordinates.CartesianRepresentation`, + or proper input for one, i.e., a `~astropy.units.Quantity` with shape (3, ...) and length units. + Defaults to [0, 0, 0], meaning "true" Geocentric. + obsgeovel : `~astropy.coordinates.CartesianRepresentation`, `~astropy.units.Quantity` + The velocity of the observer relative to the center-of-mass of the Earth, + oriented the same as BCRS/ICRS. Either 0, `~astropy.coordinates.CartesianRepresentation`, + or proper input for one, i.e., a `~astropy.units.Quantity` with shape (3, ...) and velocity units. + Defaults to [0, 0, 0], meaning "true" Geocentric. + """ + + equinox = TimeAttribute(default=EQUINOX_J2000) + obstime = TimeAttribute(default=DEFAULT_OBSTIME) + obsgeoloc = CartesianRepresentationAttribute(default=[0, 0, 0], unit=u.m) + obsgeovel = CartesianRepresentationAttribute(default=[0, 0, 0], unit=u.m/u.s) + + +PrecessedGeocentric.__doc__ = PrecessedGeocentric.__doc__.format( + params=_base_radec_docstring) diff --git a/astropy/coordinates/builtin_frames/hcrs.py b/astropy/coordinates/builtin_frames/hcrs.py new file mode 100644 index 0000000..bf69e34 --- /dev/null +++ b/astropy/coordinates/builtin_frames/hcrs.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from ..attributes import TimeAttribute +from .utils import DEFAULT_OBSTIME +from .baseradec import _base_radec_docstring, BaseRADecFrame + + +class HCRS(BaseRADecFrame): + """ + A coordinate or frame in a Heliocentric system, with axes aligned to ICRS. + + The ICRS has an origin at the Barycenter and axes which are fixed with + respect to space. + + This coordinate system is distinct from ICRS mainly in that it is relative + to the Sun's center-of-mass rather than the solar system Barycenter. + In principle, therefore, this frame should include the effects of + aberration (unlike ICRS), but this is not done, since they are very small, + of the order of 8 milli-arcseconds. + + For more background on the ICRS and related coordinate transformations, see + the references provided in the :ref:`astropy-coordinates-seealso` section of + the documentation. + + The frame attributes are listed under **Other Parameters**. + + {params} + + Other parameters + ---------------- + obstime : `~astropy.time.Time` + The time at which the observation is taken. Used for determining the + position of the Sun. + """ + + obstime = TimeAttribute(default=DEFAULT_OBSTIME) + + +HCRS.__doc__ = HCRS.__doc__.format(params=_base_radec_docstring) + +# Transformations are defined in icrs_circ_transforms.py diff --git a/astropy/coordinates/builtin_frames/icrs.py b/astropy/coordinates/builtin_frames/icrs.py new file mode 100644 index 0000000..fe5e29e --- /dev/null +++ b/astropy/coordinates/builtin_frames/icrs.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from .baseradec import _base_radec_docstring, BaseRADecFrame + + +class ICRS(BaseRADecFrame): + """ + A coordinate or frame in the ICRS system. + + If you're looking for "J2000" coordinates, and aren't sure if you want to + use this or `~astropy.coordinates.FK5`, you probably want to use ICRS. It's + more well-defined as a catalog coordinate and is an inertial system, and is + very close (within tens of milliarcseconds) to J2000 equatorial. + + For more background on the ICRS and related coordinate transformations, see the + references provided in the :ref:`astropy-coordinates-seealso` section of the + documentation. + + {params} + """ + + +ICRS.__doc__ = ICRS.__doc__.format(params=_base_radec_docstring) diff --git a/astropy/coordinates/builtin_frames/icrs_cirs_transforms.py b/astropy/coordinates/builtin_frames/icrs_cirs_transforms.py new file mode 100644 index 0000000..00ecbb1 --- /dev/null +++ b/astropy/coordinates/builtin_frames/icrs_cirs_transforms.py @@ -0,0 +1,334 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Contains the transformation functions for getting from ICRS/HCRS to CIRS and +anything in between (currently that means GCRS) +""" +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +import numpy as np + +from ... import units as u +from ..baseframe import frame_transform_graph +from ..transformations import FunctionTransformWithFiniteDifference, AffineTransform +from ..representation import (SphericalRepresentation, CartesianRepresentation, + UnitSphericalRepresentation) +from ... import _erfa as erfa + +from .icrs import ICRS +from .gcrs import GCRS +from .cirs import CIRS +from .hcrs import HCRS +from .utils import get_jd12, aticq, atciqz, get_cip, prepare_earth_position_vel + + +# First the ICRS/CIRS related transforms +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, CIRS) +def icrs_to_cirs(icrs_coo, cirs_frame): + # first set up the astrometry context for ICRS<->CIRS + jd1, jd2 = get_jd12(cirs_frame.obstime, 'tdb') + x, y, s = get_cip(jd1, jd2) + earth_pv, earth_heliocentric = prepare_earth_position_vel(cirs_frame.obstime) + astrom = erfa.apci(jd1, jd2, earth_pv, earth_heliocentric, x, y, s) + + if icrs_coo.data.get_name() == 'unitspherical' or icrs_coo.data.to_cartesian().x.unit == u.one: + # if no distance, just do the infinite-distance/no parallax calculation + usrepr = icrs_coo.represent_as(UnitSphericalRepresentation) + i_ra = usrepr.lon.to_value(u.radian) + i_dec = usrepr.lat.to_value(u.radian) + cirs_ra, cirs_dec = atciqz(i_ra, i_dec, astrom) + + newrep = UnitSphericalRepresentation(lat=u.Quantity(cirs_dec, u.radian, copy=False), + lon=u.Quantity(cirs_ra, u.radian, copy=False), + copy=False) + else: + # When there is a distance, we first offset for parallax to get the + # astrometric coordinate direction and *then* run the ERFA transform for + # no parallax/PM. This ensures reversibility and is more sensible for + # inside solar system objects + astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au, + xyz_axis=-1, copy=False) + newcart = icrs_coo.cartesian - astrom_eb + + srepr = newcart.represent_as(SphericalRepresentation) + i_ra = srepr.lon.to_value(u.radian) + i_dec = srepr.lat.to_value(u.radian) + cirs_ra, cirs_dec = atciqz(i_ra, i_dec, astrom) + + newrep = SphericalRepresentation(lat=u.Quantity(cirs_dec, u.radian, copy=False), + lon=u.Quantity(cirs_ra, u.radian, copy=False), + distance=srepr.distance, copy=False) + + return cirs_frame.realize_frame(newrep) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, ICRS) +def cirs_to_icrs(cirs_coo, icrs_frame): + srepr = cirs_coo.represent_as(SphericalRepresentation) + cirs_ra = srepr.lon.to_value(u.radian) + cirs_dec = srepr.lat.to_value(u.radian) + + # set up the astrometry context for ICRS<->cirs and then convert to + # astrometric coordinate direction + jd1, jd2 = get_jd12(cirs_coo.obstime, 'tdb') + x, y, s = get_cip(jd1, jd2) + earth_pv, earth_heliocentric = prepare_earth_position_vel(cirs_coo.obstime) + astrom = erfa.apci(jd1, jd2, earth_pv, earth_heliocentric, x, y, s) + i_ra, i_dec = aticq(cirs_ra, cirs_dec, astrom) + + if cirs_coo.data.get_name() == 'unitspherical' or cirs_coo.data.to_cartesian().x.unit == u.one: + # if no distance, just use the coordinate direction to yield the + # infinite-distance/no parallax answer + newrep = UnitSphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False), + lon=u.Quantity(i_ra, u.radian, copy=False), + copy=False) + else: + # When there is a distance, apply the parallax/offset to the SSB as the + # last step - ensures round-tripping with the icrs_to_cirs transform + + # the distance in intermedrep is *not* a real distance as it does not + # include the offset back to the SSB + intermedrep = SphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False), + lon=u.Quantity(i_ra, u.radian, copy=False), + distance=srepr.distance, + copy=False) + + astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au, + xyz_axis=-1, copy=False) + newrep = intermedrep + astrom_eb + + return icrs_frame.realize_frame(newrep) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, CIRS) +def cirs_to_cirs(from_coo, to_frame): + if np.all(from_coo.obstime == to_frame.obstime): + return to_frame.realize_frame(from_coo.data) + else: + # the CIRS<-> CIRS transform actually goes through ICRS. This has a + # subtle implication that a point in CIRS is uniquely determined + # by the corresponding astrometric ICRS coordinate *at its + # current time*. This has some subtle implications in terms of GR, but + # is sort of glossed over in the current scheme because we are dropping + # distances anyway. + return from_coo.transform_to(ICRS).transform_to(to_frame) + + +# Now the GCRS-related transforms to/from ICRS + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, GCRS) +def icrs_to_gcrs(icrs_coo, gcrs_frame): + # first set up the astrometry context for ICRS<->GCRS. There are a few steps... + # get the position and velocity arrays for the observatory. Need to + # have xyz in last dimension, and pos/vel in one-but-last. + # (Note could use np.stack once our minimum numpy version is >=1.10.) + pv = np.concatenate( + (gcrs_frame.obsgeoloc.get_xyz(xyz_axis=-1).value[..., np.newaxis, :], + gcrs_frame.obsgeovel.get_xyz(xyz_axis=-1).value[..., np.newaxis, :]), + axis=-2) + + # find the position and velocity of earth + jd1, jd2 = get_jd12(gcrs_frame.obstime, 'tdb') + earth_pv, earth_heliocentric = prepare_earth_position_vel(gcrs_frame.obstime) + + # get astrometry context object, astrom. + astrom = erfa.apcs(jd1, jd2, pv, earth_pv, earth_heliocentric) + + if icrs_coo.data.get_name() == 'unitspherical' or icrs_coo.data.to_cartesian().x.unit == u.one: + # if no distance, just do the infinite-distance/no parallax calculation + usrepr = icrs_coo.represent_as(UnitSphericalRepresentation) + i_ra = usrepr.lon.to_value(u.radian) + i_dec = usrepr.lat.to_value(u.radian) + gcrs_ra, gcrs_dec = atciqz(i_ra, i_dec, astrom) + + newrep = UnitSphericalRepresentation(lat=u.Quantity(gcrs_dec, u.radian, copy=False), + lon=u.Quantity(gcrs_ra, u.radian, copy=False), + copy=False) + else: + # When there is a distance, we first offset for parallax to get the + # BCRS coordinate direction and *then* run the ERFA transform for no + # parallax/PM. This ensures reversibility and is more sensible for + # inside solar system objects + astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au, + xyz_axis=-1, copy=False) + newcart = icrs_coo.cartesian - astrom_eb + + srepr = newcart.represent_as(SphericalRepresentation) + i_ra = srepr.lon.to_value(u.radian) + i_dec = srepr.lat.to_value(u.radian) + gcrs_ra, gcrs_dec = atciqz(i_ra, i_dec, astrom) + + newrep = SphericalRepresentation(lat=u.Quantity(gcrs_dec, u.radian, copy=False), + lon=u.Quantity(gcrs_ra, u.radian, copy=False), + distance=srepr.distance, copy=False) + + return gcrs_frame.realize_frame(newrep) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, + GCRS, ICRS) +def gcrs_to_icrs(gcrs_coo, icrs_frame): + srepr = gcrs_coo.represent_as(SphericalRepresentation) + gcrs_ra = srepr.lon.to_value(u.radian) + gcrs_dec = srepr.lat.to_value(u.radian) + + # set up the astrometry context for ICRS<->GCRS and then convert to BCRS + # coordinate direction + pv = np.concatenate( + (gcrs_coo.obsgeoloc.get_xyz(xyz_axis=-1).value[..., np.newaxis, :], + gcrs_coo.obsgeovel.get_xyz(xyz_axis=-1).value[..., np.newaxis, :]), + axis=-2) + + jd1, jd2 = get_jd12(gcrs_coo.obstime, 'tdb') + + earth_pv, earth_heliocentric = prepare_earth_position_vel(gcrs_coo.obstime) + astrom = erfa.apcs(jd1, jd2, pv, earth_pv, earth_heliocentric) + + i_ra, i_dec = aticq(gcrs_ra, gcrs_dec, astrom) + + if gcrs_coo.data.get_name() == 'unitspherical' or gcrs_coo.data.to_cartesian().x.unit == u.one: + # if no distance, just use the coordinate direction to yield the + # infinite-distance/no parallax answer + newrep = UnitSphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False), + lon=u.Quantity(i_ra, u.radian, copy=False), + copy=False) + else: + # When there is a distance, apply the parallax/offset to the SSB as the + # last step - ensures round-tripping with the icrs_to_gcrs transform + + # the distance in intermedrep is *not* a real distance as it does not + # include the offset back to the SSB + intermedrep = SphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False), + lon=u.Quantity(i_ra, u.radian, copy=False), + distance=srepr.distance, + copy=False) + + astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au, + xyz_axis=-1, copy=False) + newrep = intermedrep + astrom_eb + + return icrs_frame.realize_frame(newrep) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, GCRS) +def gcrs_to_gcrs(from_coo, to_frame): + if (np.all(from_coo.obstime == to_frame.obstime) + and np.all(from_coo.obsgeoloc == to_frame.obsgeoloc)): + return to_frame.realize_frame(from_coo.data) + else: + # like CIRS, we do this self-transform via ICRS + return from_coo.transform_to(ICRS).transform_to(to_frame) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, HCRS) +def gcrs_to_hcrs(gcrs_coo, hcrs_frame): + + if np.any(gcrs_coo.obstime != hcrs_frame.obstime): + # if they GCRS obstime and HCRS obstime are not the same, we first + # have to move to a GCRS where they are. + frameattrs = gcrs_coo.get_frame_attr_names() + frameattrs['obstime'] = hcrs_frame.obstime + gcrs_coo = gcrs_coo.transform_to(GCRS(**frameattrs)) + + srepr = gcrs_coo.represent_as(SphericalRepresentation) + gcrs_ra = srepr.lon.to_value(u.radian) + gcrs_dec = srepr.lat.to_value(u.radian) + + # set up the astrometry context for ICRS<->GCRS and then convert to ICRS + # coordinate direction + pv = np.concatenate( + (gcrs_coo.obsgeoloc.get_xyz(xyz_axis=-1).value[..., np.newaxis, :], + gcrs_coo.obsgeovel.get_xyz(xyz_axis=-1).value[..., np.newaxis, :]), + axis=-2) + + jd1, jd2 = get_jd12(hcrs_frame.obstime, 'tdb') + earth_pv, earth_heliocentric = prepare_earth_position_vel(gcrs_coo.obstime) + astrom = erfa.apcs(jd1, jd2, pv, earth_pv, earth_heliocentric) + + i_ra, i_dec = aticq(gcrs_ra, gcrs_dec, astrom) + + # convert to Quantity objects + i_ra = u.Quantity(i_ra, u.radian, copy=False) + i_dec = u.Quantity(i_dec, u.radian, copy=False) + if gcrs_coo.data.get_name() == 'unitspherical' or gcrs_coo.data.to_cartesian().x.unit == u.one: + # if no distance, just use the coordinate direction to yield the + # infinite-distance/no parallax answer + newrep = UnitSphericalRepresentation(lat=i_dec, lon=i_ra, copy=False) + else: + # When there is a distance, apply the parallax/offset to the + # Heliocentre as the last step to ensure round-tripping with the + # hcrs_to_gcrs transform + + # Note that the distance in intermedrep is *not* a real distance as it + # does not include the offset back to the Heliocentre + intermedrep = SphericalRepresentation(lat=i_dec, lon=i_ra, + distance=srepr.distance, + copy=False) + + # astrom['eh'] and astrom['em'] contain Sun to observer unit vector, + # and distance, respectively. Shapes are (X) and (X,3), where (X) is the + # shape resulting from broadcasting the shape of the times object + # against the shape of the pv array. + # broadcast em to eh and scale eh + eh = astrom['eh'] * astrom['em'][..., np.newaxis] + eh = CartesianRepresentation(eh, unit=u.au, xyz_axis=-1, copy=False) + + newrep = intermedrep.to_cartesian() + eh + + return hcrs_frame.realize_frame(newrep) + + +_NEED_ORIGIN_HINT = ("The input {0} coordinates do not have length units. This " + "probably means you created coordinates with lat/lon but " + "no distance. Heliocentric<->ICRS transforms cannot " + "function in this case because there is an origin shift.") + + +@frame_transform_graph.transform(AffineTransform, HCRS, ICRS) +def hcrs_to_icrs(hcrs_coo, icrs_frame): + # this is just an origin translation so without a distance it cannot go ahead + if isinstance(hcrs_coo.data, UnitSphericalRepresentation): + raise u.UnitsError(_NEED_ORIGIN_HINT.format(hcrs_coo.__class__.__name__)) + + if hcrs_coo.data.differentials: + from ..solar_system import get_body_barycentric_posvel + bary_sun_pos, bary_sun_vel = get_body_barycentric_posvel('sun', + hcrs_coo.obstime) + bary_sun_pos = bary_sun_pos.with_differentials(bary_sun_vel) + + else: + from ..solar_system import get_body_barycentric + bary_sun_pos = get_body_barycentric('sun', hcrs_coo.obstime) + bary_sun_vel = None + + return None, bary_sun_pos + + +@frame_transform_graph.transform(AffineTransform, ICRS, HCRS) +def icrs_to_hcrs(icrs_coo, hcrs_frame): + # this is just an origin translation so without a distance it cannot go ahead + if isinstance(icrs_coo.data, UnitSphericalRepresentation): + raise u.UnitsError(_NEED_ORIGIN_HINT.format(icrs_coo.__class__.__name__)) + + if icrs_coo.data.differentials: + from ..solar_system import get_body_barycentric_posvel + bary_sun_pos, bary_sun_vel = get_body_barycentric_posvel('sun', + hcrs_frame.obstime) + bary_sun_pos = -bary_sun_pos.with_differentials(-bary_sun_vel) + + else: + from ..solar_system import get_body_barycentric + bary_sun_pos = -get_body_barycentric('sun', hcrs_frame.obstime) + bary_sun_vel = None + + return None, bary_sun_pos + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, HCRS, HCRS) +def hcrs_to_hcrs(from_coo, to_frame): + if np.all(from_coo.obstime == to_frame.obstime): + return to_frame.realize_frame(from_coo.data) + else: + # like CIRS, we do this self-transform via ICRS + return from_coo.transform_to(ICRS).transform_to(to_frame) diff --git a/astropy/coordinates/builtin_frames/icrs_fk5_transforms.py b/astropy/coordinates/builtin_frames/icrs_fk5_transforms.py new file mode 100644 index 0000000..c73d0a3 --- /dev/null +++ b/astropy/coordinates/builtin_frames/icrs_fk5_transforms.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from ..matrix_utilities import (rotation_matrix, + matrix_product, matrix_transpose) +from ..baseframe import frame_transform_graph +from ..transformations import DynamicMatrixTransform + +from .fk5 import FK5 +from .icrs import ICRS +from .utils import EQUINOX_J2000 + + +def _icrs_to_fk5_matrix(): + """ + B-matrix from USNO circular 179. Used by the ICRS->FK5 transformation + functions. + """ + + eta0 = -19.9 / 3600000. + xi0 = 9.1 / 3600000. + da0 = -22.9 / 3600000. + + m1 = rotation_matrix(-eta0, 'x') + m2 = rotation_matrix(xi0, 'y') + m3 = rotation_matrix(da0, 'z') + + return matrix_product(m1, m2, m3) + + +# define this here because it only needs to be computed once +_ICRS_TO_FK5_J2000_MAT = _icrs_to_fk5_matrix() + + +@frame_transform_graph.transform(DynamicMatrixTransform, ICRS, FK5) +def icrs_to_fk5(icrscoord, fk5frame): + # ICRS is by design very close to J2000 equinox + pmat = fk5frame._precession_matrix(EQUINOX_J2000, fk5frame.equinox) + return matrix_product(pmat, _ICRS_TO_FK5_J2000_MAT) + + +# can't be static because the equinox is needed +@frame_transform_graph.transform(DynamicMatrixTransform, FK5, ICRS) +def fk5_to_icrs(fk5coord, icrsframe): + # ICRS is by design very close to J2000 equinox + pmat = fk5coord._precession_matrix(fk5coord.equinox, EQUINOX_J2000) + return matrix_product(matrix_transpose(_ICRS_TO_FK5_J2000_MAT), pmat) diff --git a/astropy/coordinates/builtin_frames/intermediate_rotation_transforms.py b/astropy/coordinates/builtin_frames/intermediate_rotation_transforms.py new file mode 100644 index 0000000..c0f5e30 --- /dev/null +++ b/astropy/coordinates/builtin_frames/intermediate_rotation_transforms.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Contains the transformation functions for getting to/from ITRS, GCRS, and CIRS. +These are distinct from the ICRS and AltAz functions because they are just +rotations without aberration corrections or offsets. +""" +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +import numpy as np + +from ..baseframe import frame_transform_graph +from ..transformations import FunctionTransformWithFiniteDifference +from ..matrix_utilities import matrix_transpose +from ... import _erfa as erfa + +from .gcrs import GCRS, PrecessedGeocentric +from .cirs import CIRS +from .itrs import ITRS +from .utils import get_polar_motion, get_jd12 + +# # first define helper functions + + +def gcrs_to_cirs_mat(time): + # celestial-to-intermediate matrix + return erfa.c2i06a(*get_jd12(time, 'tt')) + + +def cirs_to_itrs_mat(time): + # compute the polar motion p-matrix + xp, yp = get_polar_motion(time) + sp = erfa.sp00(*get_jd12(time, 'tt')) + pmmat = erfa.pom00(xp, yp, sp) + + # now determine the Earth Rotation Angle for the input obstime + # era00 accepts UT1, so we convert if need be + era = erfa.era00(*get_jd12(time, 'ut1')) + + # c2tcio expects a GCRS->CIRS matrix, but we just set that to an I-matrix + # because we're already in CIRS + return erfa.c2tcio(np.eye(3), era, pmmat) + + +def gcrs_precession_mat(equinox): + gamb, phib, psib, epsa = erfa.pfw06(*get_jd12(equinox, 'tt')) + return erfa.fw2m(gamb, phib, psib, epsa) + + +# now the actual transforms + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, CIRS) +def gcrs_to_cirs(gcrs_coo, cirs_frame): + # first get us to a 0 pos/vel GCRS at the target obstime + gcrs_coo2 = gcrs_coo.transform_to(GCRS(obstime=cirs_frame.obstime)) + + # now get the pmatrix + pmat = gcrs_to_cirs_mat(cirs_frame.obstime) + crepr = gcrs_coo2.cartesian.transform(pmat) + return cirs_frame.realize_frame(crepr) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, GCRS) +def cirs_to_gcrs(cirs_coo, gcrs_frame): + # compute the pmatrix, and then multiply by its transpose + pmat = gcrs_to_cirs_mat(cirs_coo.obstime) + newrepr = cirs_coo.cartesian.transform(matrix_transpose(pmat)) + gcrs = GCRS(newrepr, obstime=cirs_coo.obstime) + + # now do any needed offsets (no-op if same obstime and 0 pos/vel) + return gcrs.transform_to(gcrs_frame) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, ITRS) +def cirs_to_itrs(cirs_coo, itrs_frame): + # first get us to CIRS at the target obstime + cirs_coo2 = cirs_coo.transform_to(CIRS(obstime=itrs_frame.obstime)) + + # now get the pmatrix + pmat = cirs_to_itrs_mat(itrs_frame.obstime) + crepr = cirs_coo2.cartesian.transform(pmat) + return itrs_frame.realize_frame(crepr) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, CIRS) +def itrs_to_cirs(itrs_coo, cirs_frame): + # compute the pmatrix, and then multiply by its transpose + pmat = cirs_to_itrs_mat(itrs_coo.obstime) + newrepr = itrs_coo.cartesian.transform(matrix_transpose(pmat)) + cirs = CIRS(newrepr, obstime=itrs_coo.obstime) + + # now do any needed offsets (no-op if same obstime) + return cirs.transform_to(cirs_frame) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, ITRS) +def itrs_to_itrs(from_coo, to_frame): + # this self-transform goes through CIRS right now, which implicitly also + # goes back to ICRS + return from_coo.transform_to(CIRS).transform_to(to_frame) + +# TODO: implement GCRS<->CIRS if there's call for it. The thing that's awkward +# is that they both have obstimes, so an extra set of transformations are necessary. +# so unless there's a specific need for that, better to just have it go through the above +# two steps anyway + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, PrecessedGeocentric) +def gcrs_to_precessedgeo(from_coo, to_frame): + # first get us to GCRS with the right attributes (might be a no-op) + gcrs_coo = from_coo.transform_to(GCRS(obstime=to_frame.obstime, + obsgeoloc=to_frame.obsgeoloc, + obsgeovel=to_frame.obsgeovel)) + + # now precess to the requested equinox + pmat = gcrs_precession_mat(to_frame.equinox) + crepr = gcrs_coo.cartesian.transform(pmat) + return to_frame.realize_frame(crepr) + + +@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, PrecessedGeocentric, GCRS) +def precessedgeo_to_gcrs(from_coo, to_frame): + # first un-precess + pmat = gcrs_precession_mat(from_coo.equinox) + crepr = from_coo.cartesian.transform(matrix_transpose(pmat)) + gcrs_coo = GCRS(crepr, obstime=to_frame.obstime, + obsgeoloc=to_frame.obsgeoloc, + obsgeovel=to_frame.obsgeovel) + + # then move to the GCRS that's actually desired + return gcrs_coo.transform_to(to_frame) diff --git a/astropy/coordinates/builtin_frames/itrs.py b/astropy/coordinates/builtin_frames/itrs.py new file mode 100644 index 0000000..eaeb473 --- /dev/null +++ b/astropy/coordinates/builtin_frames/itrs.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from ..representation import CartesianRepresentation, CartesianDifferential +from ..baseframe import BaseCoordinateFrame +from ..attributes import TimeAttribute +from .utils import DEFAULT_OBSTIME + + +class ITRS(BaseCoordinateFrame): + """ + A coordinate or frame in the International Terrestrial Reference System + (ITRS). This is approximately a geocentric system, although strictly it is + defined by a series of reference locations near the surface of the Earth. + For more background on the ITRS, see the references provided in the + :ref:`astropy-coordinates-seealso` section of the documentation. + """ + + default_representation = CartesianRepresentation + default_differential = CartesianDifferential + + obstime = TimeAttribute(default=DEFAULT_OBSTIME) + + @property + def earth_location(self): + """ + The data in this frame as an `~astropy.coordinates.EarthLocation` class. + """ + from ..earth import EarthLocation + + cart = self.represent_as(CartesianRepresentation) + return EarthLocation(x=cart.x, y=cart.y, z=cart.z) + +# Self-transform is in intermediate_rotation_transforms.py with all the other +# ITRS transforms diff --git a/astropy/coordinates/builtin_frames/lsr.py b/astropy/coordinates/builtin_frames/lsr.py new file mode 100644 index 0000000..f1efdab --- /dev/null +++ b/astropy/coordinates/builtin_frames/lsr.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from ... import units as u +from ...time import Time +from .. import representation as r +from ..baseframe import (BaseCoordinateFrame, RepresentationMapping, + frame_transform_graph) +from ..transformations import AffineTransform +from ..attributes import DifferentialAttribute + +from .baseradec import _base_radec_docstring, BaseRADecFrame +from .icrs import ICRS +from .galactic import Galactic + +# For speed +J2000 = Time('J2000') + +v_bary_Schoenrich2010 = r.CartesianDifferential([11.1, 12.24, 7.25]*u.km/u.s) + +__all__ = ['LSR', 'GalacticLSR'] + + +class LSR(BaseRADecFrame): + r"""A coordinate or frame in the Local Standard of Rest (LSR). + + This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has + a velocity offset relative to the solar system barycenter to remove the + peculiar motion of the sun relative to the LSR. Roughly, the LSR is the mean + velocity of the stars in the solar neighborhood, but the precise definition + of which depends on the study. As defined in Schönrich et al. (2010): + "The LSR is the rest frame at the location of the Sun of a star that would + be on a circular orbit in the gravitational potential one would obtain by + azimuthally averaging away non-axisymmetric features in the actual Galactic + potential." No such orbit truly exists, but it is still a commonly used + velocity frame. + + We use default values from Schönrich et al. (2010) for the barycentric + velocity relative to the LSR, which is defined in Galactic (right-handed) + cartesian velocity components + :math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`. These + values are customizable via the ``v_bary`` argument which specifies the + velocity of the solar system barycenter with respect to the LSR. + + The frame attributes are listed under **Other Parameters**. + + {params} + + Other parameters + ---------------- + v_bary : `~astropy.coordinates.representation.CartesianDifferential` + The velocity of the solar system barycenter with respect to the LSR, in + Galactic cartesian velocity components. + + """ + + # frame attributes: + v_bary = DifferentialAttribute(default=v_bary_Schoenrich2010, + allowed_classes=[r.CartesianDifferential]) + + +LSR.__doc__ = LSR.__doc__.format(params=_base_radec_docstring) + + +@frame_transform_graph.transform(AffineTransform, ICRS, LSR) +def icrs_to_lsr(icrs_coord, lsr_frame): + v_bary_gal = Galactic(lsr_frame.v_bary.to_cartesian()) + v_bary_icrs = v_bary_gal.transform_to(icrs_coord) + v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential) + offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=v_offset) + return None, offset + + +@frame_transform_graph.transform(AffineTransform, LSR, ICRS) +def lsr_to_icrs(lsr_coord, icrs_frame): + v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian()) + v_bary_icrs = v_bary_gal.transform_to(icrs_frame) + v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential) + offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=-v_offset) + return None, offset + +# ------------------------------------------------------------------------------ + + +class GalacticLSR(BaseCoordinateFrame): + r"""A coordinate or frame in the Local Standard of Rest (LSR), axis-aligned + to the `Galactic` frame. + + This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has + a velocity offset relative to the solar system barycenter to remove the + peculiar motion of the sun relative to the LSR. Roughly, the LSR is the mean + velocity of the stars in the solar neighborhood, but the precise definition + of which depends on the study. As defined in Schönrich et al. (2010): + "The LSR is the rest frame at the location of the Sun of a star that would + be on a circular orbit in the gravitational potential one would obtain by + azimuthally averaging away non-axisymmetric features in the actual Galactic + potential." No such orbit truly exists, but it is still a commonly used + velocity frame. + + We use default values from Schönrich et al. (2010) for the barycentric + velocity relative to the LSR, which is defined in Galactic (right-handed) + cartesian velocity components + :math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`. These + values are customizable via the ``v_bary`` argument which specifies the + velocity of the solar system barycenter with respect to the LSR. + + The frame attributes are listed under **Other Parameters**. + + Parameters + ---------- + representation : `BaseRepresentation` or None + A representation object or None to have no data (or use the other keywords) + + l : `Angle`, optional, must be keyword + The Galactic longitude for this object (``b`` must also be given and + ``representation`` must be None). + b : `Angle`, optional, must be keyword + The Galactic latitude for this object (``l`` must also be given and + ``representation`` must be None). + distance : `~astropy.units.Quantity`, optional, must be keyword + The Distance for this object along the line-of-sight. + (``representation`` must be None). + + pm_l_cosb : :class:`~astropy.units.Quantity`, optional, must be keyword + The proper motion in Galactic longitude (including the ``cos(b)`` term) + for this object (``pm_b`` must also be given). + pm_b : :class:`~astropy.units.Quantity`, optional, must be keyword + The proper motion in Galactic latitude for this object (``pm_l_cosb`` + must also be given). + radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword + The radial velocity of this object. + + copy : bool, optional + If `True` (default), make copies of the input coordinate arrays. + Can only be passed in as a keyword argument. + + differential_cls : `BaseDifferential`, dict, optional + A differential class or dictionary of differential classes (currently + only a velocity differential with key 's' is supported). This sets + the expected input differential class, thereby changing the expected + keyword arguments of the data passed in. For example, passing + ``differential_cls=CartesianDifferential`` will make the classes + expect velocity data with the argument names ``v_x, v_y, v_z``. + + Other parameters + ---------------- + v_bary : `~astropy.coordinates.representation.CartesianDifferential` + The velocity of the solar system barycenter with respect to the LSR, in + Galactic cartesian velocity components. + """ + + frame_specific_representation_info = { + r.SphericalRepresentation: [ + RepresentationMapping('lon', 'l'), + RepresentationMapping('lat', 'b') + ], + r.SphericalCosLatDifferential: [ + RepresentationMapping('d_lon_coslat', 'pm_l_cosb', u.mas/u.yr), + RepresentationMapping('d_lat', 'pm_b', u.mas/u.yr), + RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s) + ], + r.SphericalDifferential: [ + RepresentationMapping('d_lon', 'pm_l', u.mas/u.yr), + RepresentationMapping('d_lat', 'pm_b', u.mas/u.yr), + RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s) + ], + r.CartesianDifferential: [ + RepresentationMapping('d_x', 'v_x', u.km/u.s), + RepresentationMapping('d_y', 'v_y', u.km/u.s), + RepresentationMapping('d_z', 'v_z', u.km/u.s) + ], + } + frame_specific_representation_info[r.UnitSphericalRepresentation] = \ + frame_specific_representation_info[r.SphericalRepresentation] + frame_specific_representation_info[r.UnitSphericalCosLatDifferential] = \ + frame_specific_representation_info[r.SphericalCosLatDifferential] + frame_specific_representation_info[r.UnitSphericalDifferential] = \ + frame_specific_representation_info[r.SphericalDifferential] + + default_representation = r.SphericalRepresentation + default_differential = r.SphericalCosLatDifferential + + # frame attributes: + v_bary = DifferentialAttribute(default=v_bary_Schoenrich2010) + + +@frame_transform_graph.transform(AffineTransform, Galactic, GalacticLSR) +def galactic_to_galacticlsr(galactic_coord, lsr_frame): + v_bary_gal = Galactic(lsr_frame.v_bary.to_cartesian()) + v_offset = v_bary_gal.data.represent_as(r.CartesianDifferential) + offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=v_offset) + return None, offset + + +@frame_transform_graph.transform(AffineTransform, GalacticLSR, Galactic) +def galacticlsr_to_galactic(lsr_coord, galactic_frame): + v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian()) + v_offset = v_bary_gal.data.represent_as(r.CartesianDifferential) + offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=-v_offset) + return None, offset diff --git a/astropy/coordinates/builtin_frames/skyoffset.py b/astropy/coordinates/builtin_frames/skyoffset.py new file mode 100644 index 0000000..6856a1c --- /dev/null +++ b/astropy/coordinates/builtin_frames/skyoffset.py @@ -0,0 +1,230 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +# Note: `from __future__ import unicode_literals` is omitted here on purpose. +# Adding it leads to str / unicode errors on Python 2 +from __future__ import (absolute_import, division, print_function) + +from ... import units as u +from ...utils.compat import namedtuple_asdict +from .. import representation as r +from ..transformations import DynamicMatrixTransform, FunctionTransform +from ..baseframe import (frame_transform_graph, RepresentationMapping, + BaseCoordinateFrame) +from ..attributes import CoordinateAttribute, QuantityAttribute +from ..matrix_utilities import (rotation_matrix, + matrix_product, matrix_transpose) + +_skyoffset_cache = {} + + +def make_skyoffset_cls(framecls): + """ + Create a new class that is the sky offset frame for a specific class of + origin frame. If such a class has already been created for this frame, the + same class will be returned. + + The new class will always have component names for spherical coordinates of + ``lon``/``lat``. + + Parameters + ---------- + framecls : coordinate frame class (i.e., subclass of `~astropy.coordinates.BaseCoordinateFrame`) + The class to create the SkyOffsetFrame of. + + Returns + ------- + skyoffsetframecls : class + The class for the new skyoffset frame. + + Notes + ----- + This function is necessary because Astropy's frame transformations depend + on connection between specific frame *classes*. So each type of frame + needs its own distinct skyoffset frame class. This function generates + just that class, as well as ensuring that only one example of such a class + actually gets created in any given python session. + """ + + if framecls in _skyoffset_cache: + return _skyoffset_cache[framecls] + + # the class of a class object is the metaclass + framemeta = framecls.__class__ + + class SkyOffsetMeta(framemeta): + """ + This metaclass renames the class to be "SkyOffset" and also + adjusts the frame specific representation info so that spherical names + are always "lon" and "lat" (instead of e.g. "ra" and "dec"). + """ + + def __new__(cls, name, bases, members): + # Only 'origin' is needed here, to set the origin frame properly. + members['origin'] = CoordinateAttribute(frame=framecls, default=None) + + # This has to be done because FrameMeta will set these attributes + # to the defaults from BaseCoordinateFrame when it creates the base + # SkyOffsetFrame class initially. + members['_frame_specific_representation_info'] = framecls._frame_specific_representation_info + members['_default_representation'] = framecls._default_representation + members['_default_differential'] = framecls._default_differential + + newname = name[:-5] if name.endswith('Frame') else name + newname += framecls.__name__ + + res = super(SkyOffsetMeta, cls).__new__(cls, newname, bases, members) + + # now go through all the component names and make any spherical names be "lon" and "lat" + # instead of e.g. "ra" and "dec" + + lists_done = [] + for cls_, component_list in res._frame_specific_representation_info.items(): + if cls_ in (r.SphericalRepresentation, + r.UnitSphericalRepresentation): + gotlatlon = [] + for i, comp in enumerate(component_list): + if component_list in lists_done: + # we need this because sometimes the component_ + # list's are the exact *same* object for both + # spherical and unitspherical. So looping then makes + # the change *twice*. This hack bypasses that. + continue + + if comp.reprname in ('lon', 'lat'): + dct = namedtuple_asdict(comp) + # this forces the component names to be 'lat' and + # 'lon' regardless of what the actual base frame + # might use + dct['framename'] = comp.reprname + component_list[i] = type(comp)(**dct) + gotlatlon.append(comp.reprname) + + if 'lon' not in gotlatlon: + rmlon = RepresentationMapping('lon', 'lon', 'recommended') + component_list.insert(0, rmlon) + + if 'lat' not in gotlatlon: + rmlat = RepresentationMapping('lat', 'lat', 'recommended') + component_list.insert(0, rmlat) + + # TODO: we could support proper motions / velocities in sky + # offset frames. + + lists_done.append(component_list) + + return res + + # We need this to handle the intermediate metaclass correctly, otherwise we could + # just subclass SkyOffsetFrame. + _SkyOffsetFramecls = SkyOffsetMeta('SkyOffsetFrame', (SkyOffsetFrame, framecls), + {'__doc__': SkyOffsetFrame.__doc__}) + + @frame_transform_graph.transform(FunctionTransform, _SkyOffsetFramecls, _SkyOffsetFramecls) + def skyoffset_to_skyoffset(from_skyoffset_coord, to_skyoffset_frame): + """Transform between two skyoffset frames.""" + + # This transform goes through the parent frames on each side. + # from_frame -> from_frame.origin -> to_frame.origin -> to_frame + intermediate_from = from_skyoffset_coord.transform_to(from_skyoffset_coord.origin) + intermediate_to = intermediate_from.transform_to(to_skyoffset_frame.origin) + return intermediate_to.transform_to(to_skyoffset_frame) + + @frame_transform_graph.transform(DynamicMatrixTransform, framecls, _SkyOffsetFramecls) + def reference_to_skyoffset(reference_frame, skyoffset_frame): + """Convert a reference coordinate to an sky offset frame.""" + + # Define rotation matrices along the position angle vector, and + # relative to the origin. + origin = skyoffset_frame.origin.spherical + mat1 = rotation_matrix(-skyoffset_frame.rotation, 'x') + mat2 = rotation_matrix(-origin.lat, 'y') + mat3 = rotation_matrix(origin.lon, 'z') + return matrix_product(mat1, mat2, mat3) + + @frame_transform_graph.transform(DynamicMatrixTransform, _SkyOffsetFramecls, framecls) + def skyoffset_to_reference(skyoffset_coord, reference_frame): + """Convert an sky offset frame coordinate to the reference frame""" + + # use the forward transform, but just invert it + R = reference_to_skyoffset(reference_frame, skyoffset_coord) + # transpose is the inverse because R is a rotation matrix + return matrix_transpose(R) + + _skyoffset_cache[framecls] = _SkyOffsetFramecls + return _SkyOffsetFramecls + + +class SkyOffsetFrame(BaseCoordinateFrame): + """ + A frame which is relative to some specific position and oriented to match + its frame. + + SkyOffsetFrames always have component names for spherical coordinates + of ``lon``/``lat``, *not* the component names for the frame of ``origin``. + + This is useful for calculating offsets and dithers in the frame of the sky + relative to an arbitrary position. Coordinates in this frame are both centered on the position specified by the + ``origin`` coordinate, *and* they are oriented in the same manner as the + ``origin`` frame. E.g., if ``origin`` is `~astropy.coordinates.ICRS`, this + object's ``lat`` will be pointed in the direction of Dec, while ``lon`` + will point in the direction of RA. + + For more on skyoffset frames, see :ref:`astropy-skyoffset-frames`. + + Parameters + ---------- + representation : `BaseRepresentation` or None + A representation object or None to have no data (or use the other keywords) + origin : `SkyCoord` or low-level coordinate object. + the coordinate which specifies the origin of this frame. + rotation : `~astropy.coordinates.Angle` or `~astropy.units.Quantity` with angle units + The final rotation of the frame about the ``origin``. The sign of + the rotation is the left-hand rule. That is, an object at a + particular position angle in the un-rotated system will be sent to + the positive latitude (z) direction in the final frame. + + + Notes + ----- + ``SkyOffsetFrame`` is a factory class. That is, the objects that it + yields are *not* actually objects of class ``SkyOffsetFrame``. Instead, + distinct classes are created on-the-fly for whatever the frame class is + of ``origin``. + """ + + rotation = QuantityAttribute(default=0, unit=u.deg) + origin = CoordinateAttribute(default=None, frame=None) + + def __new__(cls, *args, **kwargs): + # We don't want to call this method if we've already set up + # an skyoffset frame for this class. + if not (issubclass(cls, SkyOffsetFrame) and cls is not SkyOffsetFrame): + # We get the origin argument, and handle it here. + try: + origin_frame = kwargs['origin'] + except KeyError: + raise TypeError("Can't initialize an SkyOffsetFrame without origin= keyword.") + if hasattr(origin_frame, 'frame'): + origin_frame = origin_frame.frame + newcls = make_skyoffset_cls(origin_frame.__class__) + return newcls.__new__(newcls, *args, **kwargs) + + # http://stackoverflow.com/questions/19277399/why-does-object-new-work-differently-in-these-three-cases + # See above for why this is necessary. Basically, because some child + # may override __new__, we must override it here to never pass + # arguments to the object.__new__ method. + if super(SkyOffsetFrame, cls).__new__ is object.__new__: + return super(SkyOffsetFrame, cls).__new__(cls) + return super(SkyOffsetFrame, cls).__new__(cls, *args, **kwargs) + + def __init__(self, *args, **kwargs): + super(SkyOffsetFrame, self).__init__(*args, **kwargs) + if self.origin is not None and not self.origin.has_data: + raise ValueError('The origin supplied to SkyOffsetFrame has no ' + 'data.') + if self.has_data and hasattr(self.data, 'lon'): + self.data.lon.wrap_angle = 180*u.deg + if (self.origin is not None and getattr(self.origin.data, 'differentials', None) or + (self.has_data and getattr(self.data, 'differentials', None))): + raise NotImplementedError('SkyOffsetFrame currently does not ' + 'support velocities.') diff --git a/astropy/coordinates/builtin_frames/supergalactic.py b/astropy/coordinates/builtin_frames/supergalactic.py new file mode 100644 index 0000000..f1c3678 --- /dev/null +++ b/astropy/coordinates/builtin_frames/supergalactic.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from ... import units as u +from .. import representation as r +from ..baseframe import BaseCoordinateFrame, RepresentationMapping +from .galactic import Galactic + + +class Supergalactic(BaseCoordinateFrame): + """ + Supergalactic Coordinates + (see Lahav et al. 2000, , + and references therein). + + Parameters + ---------- + representation : `BaseRepresentation` or None + A representation object or None to have no data (or use the other keywords) + + sgl : `Angle`, optional, must be keyword + The supergalactic longitude for this object (``sgb`` must also be given and + ``representation`` must be None). + sgb : `Angle`, optional, must be keyword + The supergalactic latitude for this object (``sgl`` must also be given and + ``representation`` must be None). + distance : `~astropy.units.Quantity`, optional, must be keyword + The Distance for this object along the line-of-sight. + + pm_sgl_cossgb : :class:`~astropy.units.Quantity`, optional, must be keyword + The proper motion in Right Ascension for this object (``pm_sgb`` must + also be given). + pm_sgb : :class:`~astropy.units.Quantity`, optional, must be keyword + The proper motion in Declination for this object (``pm_sgl_cossgb`` must + also be given). + radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword + The radial velocity of this object. + + copy : bool, optional + If `True` (default), make copies of the input coordinate arrays. + Can only be passed in as a keyword argument. + + differential_cls : `BaseDifferential`, dict, optional + A differential class or dictionary of differential classes (currently + only a velocity differential with key 's' is supported). This sets + the expected input differential class, thereby changing the expected + keyword arguments of the data passed in. For example, passing + ``differential_cls=CartesianDifferential`` will make the classes + expect velocity data with the argument names ``v_x, v_y, v_z``. + """ + + frame_specific_representation_info = { + r.SphericalRepresentation: [ + RepresentationMapping('lon', 'sgl'), + RepresentationMapping('lat', 'sgb') + ], + r.CartesianRepresentation: [ + RepresentationMapping('x', 'sgx'), + RepresentationMapping('y', 'sgy'), + RepresentationMapping('z', 'sgz') + ], + r.SphericalCosLatDifferential: [ + RepresentationMapping('d_lon_coslat', 'pm_sgl_cossgb', u.mas/u.yr), + RepresentationMapping('d_lat', 'pm_sgb', u.mas/u.yr), + RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s), + ], + r.SphericalDifferential: [ + RepresentationMapping('d_lon', 'pm_sgl', u.mas/u.yr), + RepresentationMapping('d_lat', 'pm_sgb', u.mas/u.yr), + RepresentationMapping('d_distance', 'radial_velocity', u.km/u.s), + ], + r.CartesianDifferential: [ + RepresentationMapping('d_x', 'v_x', u.km/u.s), + RepresentationMapping('d_y', 'v_y', u.km/u.s), + RepresentationMapping('d_z', 'v_z', u.km/u.s) + ], + } + frame_specific_representation_info[r.UnitSphericalRepresentation] = \ + frame_specific_representation_info[r.SphericalRepresentation] + frame_specific_representation_info[r.UnitSphericalCosLatDifferential] = \ + frame_specific_representation_info[r.SphericalCosLatDifferential] + frame_specific_representation_info[r.UnitSphericalDifferential] = \ + frame_specific_representation_info[r.SphericalDifferential] + + default_representation = r.SphericalRepresentation + default_differential = r.SphericalCosLatDifferential + + # North supergalactic pole in Galactic coordinates. + # Needed for transformations to/from Galactic coordinates. + _nsgp_gal = Galactic(l=47.37*u.degree, b=+6.32*u.degree) diff --git a/astropy/coordinates/builtin_frames/supergalactic_transforms.py b/astropy/coordinates/builtin_frames/supergalactic_transforms.py new file mode 100644 index 0000000..69a2190 --- /dev/null +++ b/astropy/coordinates/builtin_frames/supergalactic_transforms.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +from ..matrix_utilities import (rotation_matrix, + matrix_product, matrix_transpose) +from ..baseframe import frame_transform_graph +from ..transformations import StaticMatrixTransform + +from .galactic import Galactic +from .supergalactic import Supergalactic + + +@frame_transform_graph.transform(StaticMatrixTransform, Galactic, Supergalactic) +def gal_to_supergal(): + mat1 = rotation_matrix(90, 'z') + mat2 = rotation_matrix(90 - Supergalactic._nsgp_gal.b.degree, 'y') + mat3 = rotation_matrix(Supergalactic._nsgp_gal.l.degree, 'z') + return matrix_product(mat1, mat2, mat3) + + +@frame_transform_graph.transform(StaticMatrixTransform, Supergalactic, Galactic) +def supergal_to_gal(): + return matrix_transpose(gal_to_supergal()) diff --git a/astropy/coordinates/builtin_frames/utils.py b/astropy/coordinates/builtin_frames/utils.py new file mode 100644 index 0000000..87877d3 --- /dev/null +++ b/astropy/coordinates/builtin_frames/utils.py @@ -0,0 +1,295 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This module contains functions/values used repeatedly in different modules of +the ``builtin_frames`` package. +""" +from __future__ import (absolute_import, unicode_literals, division, + print_function) + +import warnings + +import numpy as np + +from ... import units as u +from ... import _erfa as erfa +from ...time import Time +from ...utils import iers +from ...utils.exceptions import AstropyWarning + +from ...extern.six.moves import range + +# The UTC time scale is not properly defined prior to 1960, so Time('B1950', +# scale='utc') will emit a warning. Instead, we use Time('B1950', scale='tai') +# which is equivalent, but does not emit a warning. +EQUINOX_J2000 = Time('J2000', scale='utc') +EQUINOX_B1950 = Time('B1950', scale='tai') + +# This is a time object that is the default "obstime" when such an attribute is +# necessary. Currently, we use J2000. +DEFAULT_OBSTIME = Time('J2000', scale='utc') + +PIOVER2 = np.pi / 2. + +# comes from the mean of the 1962-2014 IERS B data +_DEFAULT_PM = (0.035, 0.29)*u.arcsec + + +def get_polar_motion(time): + """ + gets the two polar motion components in radians for use with apio13 + """ + # Get the polar motion from the IERS table + xp, yp, status = iers.IERS_Auto.open().pm_xy(time, return_status=True) + + wmsg = None + if np.any(status == iers.TIME_BEFORE_IERS_RANGE): + wmsg = ('Tried to get polar motions for times before IERS data is ' + 'valid. Defaulting to polar motion from the 50-yr mean for those. ' + 'This may affect precision at the 10s of arcsec level') + xp.ravel()[status.ravel() == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[0] + yp.ravel()[status.ravel() == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[1] + + warnings.warn(wmsg, AstropyWarning) + + if np.any(status == iers.TIME_BEYOND_IERS_RANGE): + wmsg = ('Tried to get polar motions for times after IERS data is ' + 'valid. Defaulting to polar motion from the 50-yr mean for those. ' + 'This may affect precision at the 10s of arcsec level') + + xp.ravel()[status.ravel() == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[0] + yp.ravel()[status.ravel() == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[1] + + warnings.warn(wmsg, AstropyWarning) + + return xp.to_value(u.radian), yp.to_value(u.radian) + + +def _warn_iers(ierserr): + """ + Generate a warning for an IERSRangeerror + + Parameters + ---------- + ierserr : An `~astropy.utils.iers.IERSRangeError` + """ + msg = '{0} Assuming UT1-UTC=0 for coordinate transformations.' + warnings.warn(msg.format(ierserr.args[0]), AstropyWarning) + + +def get_dut1utc(time): + """ + This function is used to get UT1-UTC in coordinates because normally it + gives an error outside the IERS range, but in coordinates we want to allow + it to go through but with a warning. + """ + try: + return time.delta_ut1_utc + except iers.IERSRangeError as e: + _warn_iers(e) + return np.zeros(time.shape) + + +def get_jd12(time, scale): + """ + Gets ``jd1`` and ``jd2`` from a time object in a particular scale. + + Parameters + ---------- + time : `~astropy.time.Time` + The time to get the jds for + scale : str + The time scale to get the jds for + + Returns + ------- + jd1 : float + jd2 : float + """ + if time.scale == scale: + newtime = time + else: + try: + newtime = getattr(time, scale) + except iers.IERSRangeError as e: + _warn_iers(e) + newtime = time + + return newtime.jd1, newtime.jd2 + + +def norm(p): + """ + Normalise a p-vector. + """ + return p/np.sqrt(np.einsum('...i,...i', p, p))[..., np.newaxis] + + +def get_cip(jd1, jd2): + """ + Find the X, Y coordinates of the CIP and the CIO locator, s. + + Parameters + ---------- + jd1 : float or `np.ndarray` + First part of two part Julian date (TDB) + jd2 : float or `np.ndarray` + Second part of two part Julian date (TDB) + + Returns + -------- + x : float or `np.ndarray` + x coordinate of the CIP + y : float or `np.ndarray` + y coordinate of the CIP + s : float or `np.ndarray` + CIO locator, s + """ + # classical NPB matrix, IAU 2006/2000A + rpnb = erfa.pnm06a(jd1, jd2) + # CIP X, Y coordinates from array + x, y = erfa.bpn2xy(rpnb) + # CIO locator, s + s = erfa.s06(jd1, jd2, x, y) + return x, y, s + + +def aticq(ri, di, astrom): + """ + A slightly modified version of the ERFA function ``eraAticq``. + + ``eraAticq`` performs the transformations between two coordinate systems, + with the details of the transformation being encoded into the ``astrom`` array. + + The companion function ``eraAtciqz`` is meant to be its inverse. However, this + is not true for directions close to the Solar centre, since the light deflection + calculations are numerically unstable and therefore not reversible. + + This version sidesteps that problem by artificially reducing the light deflection + for directions which are within 90 arcseconds of the Sun's position. This is the + same approach used by the ERFA functions above, except that they use a threshold of + 9 arcseconds. + + Parameters + ---------- + ri : float or `~numpy.ndarray` + right ascension, radians + di : float or `~numpy.ndarray` + declination, radians + astrom : eraASTROM array + ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13`` + + Returns + -------- + rc : float or `~numpy.ndarray` + dc : float or `~numpy.ndarray` + """ + # RA, Dec to cartesian unit vectors + pos = erfa.s2c(ri, di) + + # Bias-precession-nutation, giving GCRS proper direction. + ppr = erfa.trxp(astrom['bpn'], pos) + + # Aberration, giving GCRS natural direction + d = np.zeros_like(ppr) + for j in range(2): + before = norm(ppr-d) + after = erfa.ab(before, astrom['v'], astrom['em'], astrom['bm1']) + d = after - before + pnat = norm(ppr-d) + + # Light deflection by the Sun, giving BCRS coordinate direction + d = np.zeros_like(pnat) + for j in range(5): + before = norm(pnat-d) + after = erfa.ld(1.0, before, before, astrom['eh'], astrom['em'], 5e-8) + d = after - before + pco = norm(pnat-d) + + # ICRS astrometric RA, Dec + rc, dc = erfa.c2s(pco) + return erfa.anp(rc), dc + + +def atciqz(rc, dc, astrom): + """ + A slightly modified version of the ERFA function ``eraAtciqz``. + + ``eraAtciqz`` performs the transformations between two coordinate systems, + with the details of the transformation being encoded into the ``astrom`` array. + + The companion function ``eraAticq`` is meant to be its inverse. However, this + is not true for directions close to the Solar centre, since the light deflection + calculations are numerically unstable and therefore not reversible. + + This version sidesteps that problem by artificially reducing the light deflection + for directions which are within 90 arcseconds of the Sun's position. This is the + same approach used by the ERFA functions above, except that they use a threshold of + 9 arcseconds. + + Parameters + ---------- + rc : float or `~numpy.ndarray` + right ascension, radians + dc : float or `~numpy.ndarray` + declination, radians + astrom : eraASTROM array + ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13`` + + Returns + -------- + ri : float or `~numpy.ndarray` + di : float or `~numpy.ndarray` + """ + # BCRS coordinate direction (unit vector). + pco = erfa.s2c(rc, dc) + + # Light deflection by the Sun, giving BCRS natural direction. + pnat = erfa.ld(1.0, pco, pco, astrom['eh'], astrom['em'], 5e-8) + + # Aberration, giving GCRS proper direction. + ppr = erfa.ab(pnat, astrom['v'], astrom['em'], astrom['bm1']) + + # Bias-precession-nutation, giving CIRS proper direction. + # Has no effect if matrix is identity matrix, in which case gives GCRS ppr. + pi = erfa.rxp(astrom['bpn'], ppr) + + # CIRS (GCRS) RA, Dec + ri, di = erfa.c2s(pi) + return erfa.anp(ri), di + + +def prepare_earth_position_vel(time): + """ + Get barycentric position and velocity, and heliocentric position of Earth + + Parameters + ----------- + time : `~astropy.time.Time` + time at which to calculate position and velocity of Earth + + Returns + -------- + earth_pv : `np.ndarray` + Barycentric position and velocity of Earth, in au and au/day + earth_helio : `np.ndarray` + Heliocentric position of Earth in au + """ + # this goes here to avoid circular import errors + from ..solar_system import (get_body_barycentric, get_body_barycentric_posvel) + # get barycentric position and velocity of earth + earth_pv = get_body_barycentric_posvel('earth', time) + + # get heliocentric position of earth, preparing it for passing to erfa. + sun = get_body_barycentric('sun', time) + earth_heliocentric = (earth_pv[0] - + sun).get_xyz(xyz_axis=-1).to_value(u.au) + + # Also prepare earth_pv for passing to erfa, which wants xyz in last + # dimension, and pos/vel in one-but-last. + # (Note could use np.stack once our minimum numpy version is >=1.10.) + earth_pv = np.concatenate((earth_pv[0].get_xyz(xyz_axis=-1).to(u.au) + [..., np.newaxis, :].value, + earth_pv[1].get_xyz(xyz_axis=-1).to(u.au/u.d) + [..., np.newaxis, :].value), axis=-2) + return earth_pv, earth_heliocentric diff --git a/astropy/coordinates/calculation.py b/astropy/coordinates/calculation.py new file mode 100644 index 0000000..b7e1c84 --- /dev/null +++ b/astropy/coordinates/calculation.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +# Standard library +from datetime import datetime +from xml.dom.minidom import parse +import re +import textwrap + +# Third-party +from .. import time as atime +from ..utils.console import color_print, _color_text +from ..extern.six.moves.urllib.request import urlopen +from . import get_sun + +__all__ = [] + + +class HumanError(ValueError): pass + + +class CelestialError(ValueError): pass + + +def get_sign(dt): + """ + """ + if ((int(dt.month) == 12 and int(dt.day) >= 22)or(int(dt.month) == 1 and int(dt.day) <= 19)): + zodiac_sign = "capricorn" + elif ((int(dt.month) == 1 and int(dt.day) >= 20)or(int(dt.month) == 2 and int(dt.day) <= 17)): + zodiac_sign = "aquarius" + elif ((int(dt.month) == 2 and int(dt.day) >= 18)or(int(dt.month) == 3 and int(dt.day) <= 19)): + zodiac_sign = "pisces" + elif ((int(dt.month) == 3 and int(dt.day) >= 20)or(int(dt.month) == 4 and int(dt.day) <= 19)): + zodiac_sign = "aries" + elif ((int(dt.month) == 4 and int(dt.day) >= 20)or(int(dt.month) == 5 and int(dt.day) <= 20)): + zodiac_sign = "taurus" + elif ((int(dt.month) == 5 and int(dt.day) >= 21)or(int(dt.month) == 6 and int(dt.day) <= 20)): + zodiac_sign = "gemini" + elif ((int(dt.month) == 6 and int(dt.day) >= 21)or(int(dt.month) == 7 and int(dt.day) <= 22)): + zodiac_sign = "cancer" + elif ((int(dt.month) == 7 and int(dt.day) >= 23)or(int(dt.month) == 8 and int(dt.day) <= 22)): + zodiac_sign = "leo" + elif ((int(dt.month) == 8 and int(dt.day) >= 23)or(int(dt.month) == 9 and int(dt.day) <= 22)): + zodiac_sign = "virgo" + elif ((int(dt.month) == 9 and int(dt.day) >= 23)or(int(dt.month) == 10 and int(dt.day) <= 22)): + zodiac_sign = "libra" + elif ((int(dt.month) == 10 and int(dt.day) >= 23)or(int(dt.month) == 11 and int(dt.day) <= 21)): + zodiac_sign = "scorpio" + elif ((int(dt.month) == 11 and int(dt.day) >= 22)or(int(dt.month) == 12 and int(dt.day) <= 21)): + zodiac_sign = "sagittarius" + + return zodiac_sign + + +_VALID_SIGNS = ["capricorn", "aquarius", "pisces", "aries", "taurus", "gemini", + "cancer", "leo", "virgo", "libra", "scorpio", "sagittarius"] +# Some of the constellation names map to different astrological "sign names". +# Astrologers really needs to talk to the IAU... +_CONST_TO_SIGNS = {'capricornus': 'capricorn', 'scorpius': 'scorpio'} + + +def horoscope(birthday, corrected=True): + """ + Enter your birthday as an `astropy.time.Time` object and + receive a mystical horoscope about things to come. + + Parameter + --------- + birthday : `astropy.time.Time` + Your birthday as a `datetime.datetime` or `astropy.time.Time` object. + corrected : bool + Whether to account for the precession of the Earth instead of using the + ancient Greek dates for the signs. After all, you do want your *real* + horoscope, not a cheap inaccurate approximation, right? + + Returns + ------- + Infinite wisdom, condensed into astrologically precise prose. + + Notes + ----- + This function was implemented on April 1. Take note of that date. + """ + + special_words = { + '([sS]tar[s^ ]*)': 'yellow', + '([yY]ou[^ ]*)': 'magenta', + '([pP]lay[^ ]*)': 'blue', + '([hH]eart)': 'red', + '([fF]ate)': 'lightgreen', + } + + birthday = atime.Time(birthday) + today = datetime.now() + if corrected: + zodiac_sign = get_sun(birthday).get_constellation().lower() + zodiac_sign = _CONST_TO_SIGNS.get(zodiac_sign, zodiac_sign) + if zodiac_sign not in _VALID_SIGNS: + raise HumanError('On your birthday the sun was in {}, which is not ' + 'a sign of the zodiac. You must not exist. Or ' + 'maybe you can settle for ' + 'corrected=False.'.format(zodiac_sign.title())) + else: + zodiac_sign = get_sign(birthday.to_datetime()) + url = "http://www.findyourfate.com/rss/dailyhoroscope-feed.php?sign={sign}&id=45" + + f = urlopen(url.format(sign=zodiac_sign.capitalize())) + try: # urlopen in py2 is not a decorator + doc = parse(f) + item = doc.getElementsByTagName('item')[0] + desc = item.getElementsByTagName('description')[0].childNodes[0].nodeValue + except Exception: + raise CelestialError("Invalid response from celestial gods (failed to load horoscope).") + finally: + f.close() + + print("*"*79) + color_print("Horoscope for {} on {}:".format(zodiac_sign.capitalize(), today.strftime("%Y-%m-%d")), + 'green') + print("*"*79) + for block in textwrap.wrap(desc, 79): + split_block = block.split() + for i, word in enumerate(split_block): + for re_word in special_words.keys(): + match = re.search(re_word, word) + if match is None: + continue + split_block[i] = _color_text(match.groups()[0], special_words[re_word]) + print(" ".join(split_block)) + + +def inject_horoscope(): + import astropy + astropy._yourfuture = horoscope + + +inject_horoscope() diff --git a/astropy/coordinates/data/constellation_data_roman87.dat b/astropy/coordinates/data/constellation_data_roman87.dat new file mode 100644 index 0000000..aa6c56f --- /dev/null +++ b/astropy/coordinates/data/constellation_data_roman87.dat @@ -0,0 +1,358 @@ +# This data file is from Roman et al. 1987: http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42 + 0.0000 24.0000 88.0000 UMi + 8.0000 14.5000 86.5000 UMi + 21.0000 23.0000 86.1667 UMi + 18.0000 21.0000 86.0000 UMi + 0.0000 8.0000 85.0000 Cep + 9.1667 10.6667 82.0000 Cam + 0.0000 5.0000 80.0000 Cep + 10.6667 14.5000 80.0000 Cam + 17.5000 18.0000 80.0000 UMi + 20.1667 21.0000 80.0000 Dra + 0.0000 3.5083 77.0000 Cep + 11.5000 13.5833 77.0000 Cam + 16.5333 17.5000 75.0000 UMi + 20.1667 20.6667 75.0000 Cep + 7.9667 9.1667 73.5000 Cam + 9.1667 11.3333 73.5000 Dra + 13.0000 16.5333 70.0000 UMi + 3.1000 3.4167 68.0000 Cas + 20.4167 20.6667 67.0000 Dra + 11.3333 12.0000 66.5000 Dra + 0.0000 0.3333 66.0000 Cep + 14.0000 15.6667 66.0000 UMi + 23.5833 24.0000 66.0000 Cep + 12.0000 13.5000 64.0000 Dra + 13.5000 14.4167 63.0000 Dra + 23.1667 23.5833 63.0000 Cep + 6.1000 7.0000 62.0000 Cam + 20.0000 20.4167 61.5000 Dra + 20.5367 20.6000 60.9167 Cep + 7.0000 7.9667 60.0000 Cam + 7.9667 8.4167 60.0000 UMa + 19.7667 20.0000 59.5000 Dra + 20.0000 20.5367 59.5000 Cep + 22.8667 23.1667 59.0833 Cep + 0.0000 2.4333 58.5000 Cas + 19.4167 19.7667 58.0000 Dra + 1.7000 1.9083 57.5000 Cas + 2.4333 3.1000 57.0000 Cas + 3.1000 3.1667 57.0000 Cam + 22.3167 22.8667 56.2500 Cep + 5.0000 6.1000 56.0000 Cam + 14.0333 14.4167 55.5000 UMa + 14.4167 19.4167 55.5000 Dra + 3.1667 3.3333 55.0000 Cam + 22.1333 22.3167 55.0000 Cep + 20.6000 21.9667 54.8333 Cep + 0.0000 1.7000 54.0000 Cas + 6.1000 6.5000 54.0000 Lyn + 12.0833 13.5000 53.0000 UMa + 15.2500 15.7500 53.0000 Dra + 21.9667 22.1333 52.7500 Cep + 3.3333 5.0000 52.5000 Cam + 22.8667 23.3333 52.5000 Cas + 15.7500 17.0000 51.5000 Dra + 2.0417 2.5167 50.5000 Per + 17.0000 18.2333 50.5000 Dra + 0.0000 1.3667 50.0000 Cas + 1.3667 1.6667 50.0000 Per + 6.5000 6.8000 50.0000 Lyn + 23.3333 24.0000 50.0000 Cas + 13.5000 14.0333 48.5000 UMa + 0.0000 1.1167 48.0000 Cas + 23.5833 24.0000 48.0000 Cas + 18.1750 18.2333 47.5000 Her + 18.2333 19.0833 47.5000 Dra + 19.0833 19.1667 47.5000 Cyg + 1.6667 2.0417 47.0000 Per + 8.4167 9.1667 47.0000 UMa + 0.1667 0.8667 46.0000 Cas + 12.0000 12.0833 45.0000 UMa + 6.8000 7.3667 44.5000 Lyn + 21.9083 21.9667 44.0000 Cyg + 21.8750 21.9083 43.7500 Cyg + 19.1667 19.4000 43.5000 Cyg + 9.1667 10.1667 42.0000 UMa + 10.1667 10.7833 40.0000 UMa + 15.4333 15.7500 40.0000 Boo + 15.7500 16.3333 40.0000 Her + 9.2500 9.5833 39.7500 Lyn + 0.0000 2.5167 36.7500 And + 2.5167 2.5667 36.7500 Per + 19.3583 19.4000 36.5000 Lyr + 4.5000 4.6917 36.0000 Per + 21.7333 21.8750 36.0000 Cyg + 21.8750 22.0000 36.0000 Lac + 6.5333 7.3667 35.5000 Aur + 7.3667 7.7500 35.5000 Lyn + 0.0000 2.0000 35.0000 And + 22.0000 22.8167 35.0000 Lac + 22.8167 22.8667 34.5000 Lac + 22.8667 23.5000 34.5000 And + 2.5667 2.7167 34.0000 Per + 10.7833 11.0000 34.0000 UMa + 12.0000 12.3333 34.0000 CVn + 7.7500 9.2500 33.5000 Lyn + 9.2500 9.8833 33.5000 LMi + 0.7167 1.4083 33.0000 And + 15.1833 15.4333 33.0000 Boo + 23.5000 23.7500 32.0833 And + 12.3333 13.2500 32.0000 CVn + 23.7500 24.0000 31.3333 And + 13.9583 14.0333 30.7500 CVn + 2.4167 2.7167 30.6667 Tri + 2.7167 4.5000 30.6667 Per + 4.5000 4.7500 30.0000 Aur + 18.1750 19.3583 30.0000 Lyr + 11.0000 12.0000 29.0000 UMa + 19.6667 20.9167 29.0000 Cyg + 4.7500 5.8833 28.5000 Aur + 9.8833 10.5000 28.5000 LMi + 13.2500 13.9583 28.5000 CVn + 0.0000 0.0667 28.0000 And + 1.4083 1.6667 28.0000 Tri + 5.8833 6.5333 28.0000 Aur + 7.8833 8.0000 28.0000 Gem + 20.9167 21.7333 28.0000 Cyg + 19.2583 19.6667 27.5000 Cyg + 1.9167 2.4167 27.2500 Tri + 16.1667 16.3333 27.0000 CrB + 15.0833 15.1833 26.0000 Boo + 15.1833 16.1667 26.0000 CrB + 18.3667 18.8667 26.0000 Lyr + 10.7500 11.0000 25.5000 LMi + 18.8667 19.2583 25.5000 Lyr + 1.6667 1.9167 25.0000 Tri + 0.7167 0.8500 23.7500 Psc + 10.5000 10.7500 23.5000 LMi + 21.2500 21.4167 23.5000 Vul + 5.7000 5.8833 22.8333 Tau + 0.0667 0.1417 22.0000 And + 15.9167 16.0333 22.0000 Ser + 5.8833 6.2167 21.5000 Gem + 19.8333 20.2500 21.2500 Vul + 18.8667 19.2500 21.0833 Vul + 0.1417 0.8500 21.0000 And + 20.2500 20.5667 20.5000 Vul + 7.8083 7.8833 20.0000 Gem + 20.5667 21.2500 19.5000 Vul + 19.2500 19.8333 19.1667 Vul + 3.2833 3.3667 19.0000 Ari + 18.8667 19.0000 18.5000 Sge + 5.7000 5.7667 18.0000 Ori + 6.2167 6.3083 17.5000 Gem + 19.0000 19.8333 16.1667 Sge + 4.9667 5.3333 16.0000 Tau + 15.9167 16.0833 16.0000 Her + 19.8333 20.2500 15.7500 Sge + 4.6167 4.9667 15.5000 Tau + 5.3333 5.6000 15.5000 Tau + 12.8333 13.5000 15.0000 Com + 17.2500 18.2500 14.3333 Her + 11.8667 12.8333 14.0000 Com + 7.5000 7.8083 13.5000 Gem + 16.7500 17.2500 12.8333 Her + 0.0000 0.1417 12.5000 Peg + 5.6000 5.7667 12.5000 Tau + 7.0000 7.5000 12.5000 Gem + 21.1167 21.3333 12.5000 Peg + 6.3083 6.9333 12.0000 Gem + 18.2500 18.8667 12.0000 Her + 20.8750 21.0500 11.8333 Del + 21.0500 21.1167 11.8333 Peg + 11.5167 11.8667 11.0000 Leo + 6.2417 6.3083 10.0000 Ori + 6.9333 7.0000 10.0000 Gem + 7.8083 7.9250 10.0000 Cnc + 23.8333 24.0000 10.0000 Peg + 1.6667 3.2833 9.9167 Ari + 20.1417 20.3000 8.5000 Del + 13.5000 15.0833 8.0000 Boo + 22.7500 23.8333 7.5000 Peg + 7.9250 9.2500 7.0000 Cnc + 9.2500 10.7500 7.0000 Leo + 18.2500 18.6622 6.2500 Oph + 18.6622 18.8667 6.2500 Aql + 20.8333 20.8750 6.0000 Del + 7.0000 7.0167 5.5000 CMi + 18.2500 18.4250 4.5000 Ser + 16.0833 16.7500 4.0000 Her + 18.2500 18.4250 3.0000 Oph + 21.4667 21.6667 2.7500 Peg + 0.0000 2.0000 2.0000 Psc + 18.5833 18.8667 2.0000 Ser + 20.3000 20.8333 2.0000 Del + 20.8333 21.3333 2.0000 Equ + 21.3333 21.4667 2.0000 Peg + 22.0000 22.7500 2.0000 Peg + 21.6667 22.0000 1.7500 Peg + 7.0167 7.2000 1.5000 CMi + 3.5833 4.6167 0.0000 Tau + 4.6167 4.6667 0.0000 Ori + 7.2000 8.0833 0.0000 CMi + 14.6667 15.0833 0.0000 Vir + 17.8333 18.2500 0.0000 Oph + 2.6500 3.2833 -01.7500 Cet + 3.2833 3.5833 -01.7500 Tau + 15.0833 16.2667 -03.2500 Ser + 4.6667 5.0833 -04.0000 Ori + 5.8333 6.2417 -04.0000 Ori + 17.8333 17.9667 -04.0000 Ser + 18.2500 18.5833 -04.0000 Ser + 18.5833 18.8667 -04.0000 Aql + 22.7500 23.8333 -04.0000 Psc + 10.7500 11.5167 -06.0000 Leo + 11.5167 11.8333 -06.0000 Vir + 0.0000 00.3333 -07.0000 Psc + 23.8333 24.0000 -07.0000 Psc + 14.2500 14.6667 -08.0000 Vir + 15.9167 16.2667 -08.0000 Oph + 20.0000 20.5333 -09.0000 Aql + 21.3333 21.8667 -09.0000 Aqr + 17.1667 17.9667 -10.0000 Oph + 5.8333 8.0833 -11.0000 Mon + 4.9167 5.0833 -11.0000 Eri + 5.0833 5.8333 -11.0000 Ori + 8.0833 8.3667 -11.0000 Hya + 9.5833 10.7500 -11.0000 Sex + 11.8333 12.8333 -11.0000 Vir + 17.5833 17.6667 -11.6667 Oph + 18.8667 20.0000 -12.0333 Aql + 4.8333 4.9167 -14.5000 Eri + 20.5333 21.3333 -15.0000 Aqr + 17.1667 18.2500 -16.0000 Ser + 18.2500 18.8667 -16.0000 Sct + 8.3667 8.5833 -17.0000 Hya + 16.2667 16.3750 -18.2500 Oph + 8.5833 9.0833 -19.0000 Hya + 10.7500 10.8333 -19.0000 Crt + 16.2667 16.3750 -19.2500 Sco + 15.6667 15.9167 -20.0000 Lib + 12.5833 12.8333 -22.0000 Crv + 12.8333 14.2500 -22.0000 Vir + 9.0833 9.7500 -24.0000 Hya + 1.6667 2.6500 -24.3833 Cet + 2.6500 3.7500 -24.3833 Eri + 10.8333 11.8333 -24.5000 Crt + 11.8333 12.5833 -24.5000 Crv + 14.2500 14.9167 -24.5000 Lib + 16.2667 16.7500 -24.5833 Oph + 0.0000 1.6667 -25.5000 Cet + 21.3333 21.8667 -25.5000 Cap + 21.8667 23.8333 -25.5000 Aqr + 23.8333 24.0000 -25.5000 Cet + 9.7500 10.2500 -26.5000 Hya + 4.7000 4.8333 -27.2500 Eri + 4.8333 6.1167 -27.2500 Lep + 20.0000 21.3333 -28.0000 Cap + 10.2500 10.5833 -29.1667 Hya + 12.5833 14.9167 -29.5000 Hya + 14.9167 15.6667 -29.5000 Lib + 15.6667 16.0000 -29.5000 Sco + 4.5833 4.7000 -30.0000 Eri + 16.7500 17.6000 -30.0000 Oph + 17.6000 17.8333 -30.0000 Sgr + 10.5833 10.8333 -31.1667 Hya + 6.1167 7.3667 -33.0000 CMa + 12.2500 12.5833 -33.0000 Hya + 10.8333 12.2500 -35.0000 Hya + 3.5000 3.7500 -36.0000 For + 8.3667 9.3667 -36.7500 Pyx + 4.2667 4.5833 -37.0000 Eri + 17.8333 19.1667 -37.0000 Sgr + 21.3333 23.0000 -37.0000 PsA + 23.0000 23.3333 -37.0000 Scl + 3.0000 3.5000 -39.5833 For + 9.3667 11.0000 -39.7500 Ant + 0.0000 1.6667 -40.0000 Scl + 1.6667 3.0000 -40.0000 For + 3.8667 4.2667 -40.0000 Eri + 23.3333 24.0000 -40.0000 Scl + 14.1667 14.9167 -42.0000 Cen + 15.6667 16.0000 -42.0000 Lup + 16.0000 16.4208 -42.0000 Sco + 4.8333 5.0000 -43.0000 Cae + 5.0000 6.5833 -43.0000 Col + 8.0000 8.3667 -43.0000 Pup + 3.4167 3.8667 -44.0000 Eri + 16.4208 17.8333 -45.5000 Sco + 17.8333 19.1667 -45.5000 CrA + 19.1667 20.3333 -45.5000 Sgr + 20.3333 21.3333 -45.5000 Mic + 3.0000 3.4167 -46.0000 Eri + 4.5000 4.8333 -46.5000 Cae + 15.3333 15.6667 -48.0000 Lup + 0.0000 2.3333 -48.1667 Phe + 2.6667 3.0000 -49.0000 Eri + 4.0833 4.2667 -49.0000 Hor + 4.2667 4.5000 -49.0000 Cae + 21.3333 22.0000 -50.0000 Gru + 6.0000 8.0000 -50.7500 Pup + 8.0000 8.1667 -50.7500 Vel + 2.4167 2.6667 -51.0000 Eri + 3.8333 4.0833 -51.0000 Hor + 0.0000 1.8333 -51.5000 Phe + 6.0000 6.1667 -52.5000 Car + 8.1667 8.4500 -53.0000 Vel + 3.5000 3.8333 -53.1667 Hor + 3.8333 4.0000 -53.1667 Dor + 0.0000 1.5833 -53.5000 Phe + 2.1667 2.4167 -54.0000 Eri + 4.5000 5.0000 -54.0000 Pic + 15.0500 15.3333 -54.0000 Lup + 8.4500 8.8333 -54.5000 Vel + 6.1667 6.5000 -55.0000 Car + 11.8333 12.8333 -55.0000 Cen + 14.1667 15.0500 -55.0000 Lup + 15.0500 15.3333 -55.0000 Nor + 4.0000 4.3333 -56.5000 Dor + 8.8333 11.0000 -56.5000 Vel + 11.0000 11.2500 -56.5000 Cen + 17.5000 18.0000 -57.0000 Ara + 18.0000 20.3333 -57.0000 Tel + 22.0000 23.3333 -57.0000 Gru + 3.2000 3.5000 -57.5000 Hor + 5.0000 5.5000 -57.5000 Pic + 6.5000 6.8333 -58.0000 Car + 0.0000 1.3333 -58.5000 Phe + 1.3333 2.1667 -58.5000 Eri + 23.3333 24.0000 -58.5000 Phe + 4.3333 4.5833 -59.0000 Dor + 15.3333 16.4208 -60.0000 Nor + 20.3333 21.3333 -60.0000 Ind + 5.5000 6.0000 -61.0000 Pic + 15.1667 15.3333 -61.0000 Cir + 16.4208 16.5833 -61.0000 Ara + 14.9167 15.1667 -63.5833 Cir + 16.5833 16.7500 -63.5833 Ara + 6.0000 6.8333 -64.0000 Pic + 6.8333 9.0333 -64.0000 Car + 11.2500 11.8333 -64.0000 Cen + 11.8333 12.8333 -64.0000 Cru + 12.8333 14.5333 -64.0000 Cen + 13.5000 13.6667 -65.0000 Cir + 16.7500 16.8333 -65.0000 Ara + 2.1667 3.2000 -67.5000 Hor + 3.2000 4.5833 -67.5000 Ret + 14.7500 14.9167 -67.5000 Cir + 16.8333 17.5000 -67.5000 Ara + 17.5000 18.0000 -67.5000 Pav + 22.0000 23.3333 -67.5000 Tuc + 4.5833 6.5833 -70.0000 Dor + 13.6667 14.7500 -70.0000 Cir + 14.7500 17.0000 -70.0000 TrA + 0.0000 1.3333 -75.0000 Tuc + 3.5000 4.5833 -75.0000 Hyi + 6.5833 9.0333 -75.0000 Vol + 9.0333 11.2500 -75.0000 Car + 11.2500 13.6667 -75.0000 Mus + 18.0000 21.3333 -75.0000 Pav + 21.3333 23.3333 -75.0000 Ind + 23.3333 24.0000 -75.0000 Tuc + 0.7500 1.3333 -76.0000 Tuc + 0.0000 3.5000 -82.5000 Hyi + 7.6667 13.6667 -82.5000 Cha + 13.6667 18.0000 -82.5000 Aps + 3.5000 7.6667 -85.0000 Men + 0.0000 24.0000 -90.0000 Oct diff --git a/astropy/coordinates/data/constellation_names.dat b/astropy/coordinates/data/constellation_names.dat new file mode 100644 index 0000000..a6ba4e5 --- /dev/null +++ b/astropy/coordinates/data/constellation_names.dat @@ -0,0 +1,89 @@ +# This list gives the official IAU constellation names via vizier: http://vizier.u-strasbg.fr/vizier/VizieR/constellations.htx +And Andromeda +Ant Antlia +Aps Apus +Aqr Aquarius +Aql Aquila +Ara Ara +Ari Aries +Aur Auriga +Boo Boötes +Cae Caelum +Cam Camelopardalis +Cnc Cancer +CVn Canes Venatici +CMa Canis Major +CMi Canis Minor +Cap Capricornus +Car Carina +Cas Cassiopeia +Cen Centaurus +Cep Cepheus +Cet Cetus +Cha Chamaleon +Cir Circinus +Col Columba +Com Coma Berenices +CrA Corona Australis +CrB Corona Borealis +Crv Corvus +Crt Crater +Cru Crux +Cyg Cygnus +Del Delphinus +Dor Dorado +Dra Draco +Equ Equuleus +Eri Eridanus +For Fornax +Gem Gemini +Gru Grus +Her Hercules +Hor Horologium +Hya Hydra +Hyi Hydrus +Ind Indus +Lac Lacerta +Leo Leo +LMi Leo Minor +Lep Lepus +Lib Libra +Lup Lupus +Lyn Lynx +Lyr Lyra +Men Mensa +Mic Microscopium +Mon Monoceros +Mus Musca +Nor Norma +Oct Octans +Oph Ophiucus +Ori Orion +Pav Pavo +Peg Pegasus +Per Perseus +Phe Phoenix +Pic Pictor +Psc Pisces +PsA Pisces Austrinus +Pup Puppis +Pyx Pyxis +Ret Reticulum +Sge Sagitta +Sgr Sagittarius +Sco Scorpius +Scl Sculptor +Sct Scutum +Ser Serpens +Sex Sextans +Tau Taurus +Tel Telescopium +Tri Triangulum +TrA Triangulum Australe +Tuc Tucana +UMa Ursa Major +UMi Ursa Minor +Vel Vela +Vir Virgo +Vol Volans +Vul Vulpecula \ No newline at end of file diff --git a/astropy/coordinates/data/sites.json b/astropy/coordinates/data/sites.json new file mode 100644 index 0000000..cbcb8ff --- /dev/null +++ b/astropy/coordinates/data/sites.json @@ -0,0 +1,15 @@ +{ + "greenwich": { + "source": "Ordnance Survey via http://gpsinformation.net/main/greenwich.htm and UNESCO", + "elevation": 46, + "name": "Royal Observatory Greenwich", + "longitude_unit": "degree", + "latitude_unit": "degree", + "latitude": 51.477811, + "elevation_unit": "meter", + "longitude": -0.001475, + "aliases": [ + "example_site" + ] + } +} diff --git a/astropy/coordinates/distances.py b/astropy/coordinates/distances.py new file mode 100644 index 0000000..2c537c0 --- /dev/null +++ b/astropy/coordinates/distances.py @@ -0,0 +1,206 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module contains the classes and utility functions for distance and +cartesian coordinates. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from .. import units as u + +__all__ = ['Distance'] + + +__doctest_requires__ = {'*': ['scipy.integrate']} + + +class Distance(u.SpecificTypeQuantity): + """ + A one-dimensional distance. + + This can be initialized in one of four ways: + + * A distance ``value`` (array or float) and a ``unit`` + * A `~astropy.units.Quantity` object + * A redshift and (optionally) a cosmology. + * Providing a distance modulus + + Parameters + ---------- + value : scalar or `~astropy.units.Quantity`. + The value of this distance. + unit : `~astropy.units.UnitBase` + The units for this distance, *if* ``value`` is not a + `~astropy.units.Quantity`. Must have dimensions of distance. + z : float + A redshift for this distance. It will be converted to a distance + by computing the luminosity distance for this redshift given the + cosmology specified by ``cosmology``. Must be given as a keyword + argument. + cosmology : ``Cosmology`` or `None` + A cosmology that will be used to compute the distance from ``z``. + If `None`, the current cosmology will be used (see + `astropy.cosmology` for details). + distmod : float or `~astropy.units.Quantity` + The distance modulus for this distance. Note that if ``unit`` is not + provided, a guess will be made at the unit between AU, pc, kpc, and Mpc. + dtype : `~numpy.dtype`, optional + See `~astropy.units.Quantity`. + copy : bool, optional + See `~astropy.units.Quantity`. + order : {'C', 'F', 'A'}, optional + See `~astropy.units.Quantity`. + subok : bool, optional + See `~astropy.units.Quantity`. + ndmin : int, optional + See `~astropy.units.Quantity`. + allow_negative : bool, optional + Whether to allow negative distances (which are possible is some + cosmologies). Default: ``False``. + + Raises + ------ + `~astropy.units.UnitsError` + If the ``unit`` is not a distance. + ValueError + If value specified is less than 0 and ``allow_negative=False``. + + If ``z`` is provided with a ``unit`` or ``cosmology`` is provided + when ``z`` is *not* given, or ``value`` is given as well as ``z``. + + + Examples + -------- + >>> from astropy import units as u + >>> from astropy import cosmology + >>> from astropy.cosmology import WMAP5, WMAP7 + >>> cosmology.set_current(WMAP7) + >>> d1 = Distance(10, u.Mpc) + >>> d2 = Distance(40, unit=u.au) + >>> d3 = Distance(value=5, unit=u.kpc) + >>> d4 = Distance(z=0.23) + >>> d5 = Distance(z=0.23, cosmology=WMAP5) + >>> d6 = Distance(distmod=24.47) + >>> d7 = Distance(Distance(10 * u.Mpc)) + """ + + _equivalent_unit = u.m + _include_easy_conversion_members = True + + def __new__(cls, value=None, unit=None, z=None, cosmology=None, + distmod=None, dtype=None, copy=True, order=None, + subok=False, ndmin=0, allow_negative=False): + + if z is not None: + if value is not None or distmod is not None: + raise ValueError('Should given only one of `value`, `z` ' + 'or `distmod` in Distance constructor.') + + if cosmology is None: + from ..cosmology import default_cosmology + cosmology = default_cosmology.get() + + value = cosmology.luminosity_distance(z) + # Continue on to take account of unit and other arguments + # but a copy is already made, so no longer necessary + copy = False + + else: + if cosmology is not None: + raise ValueError('A `cosmology` was given but `z` was not ' + 'provided in Distance constructor') + + if distmod is not None: + if value is not None: + raise ValueError('Should given only one of `value`, `z` ' + 'or `distmod` in Distance constructor.') + + value = cls._distmod_to_pc(distmod) + if unit is None: + # if the unit is not specified, guess based on the mean of + # the log of the distance + meanlogval = np.log10(value.value).mean() + if meanlogval > 6: + unit = u.Mpc + elif meanlogval > 3: + unit = u.kpc + elif meanlogval < -3: # ~200 AU + unit = u.AU + else: + unit = u.pc + + # Continue on to take account of unit and other arguments + # but a copy is already made, so no longer necessary + copy = False + + elif value is None: + raise ValueError('None of `value`, `z`, or `distmod` were ' + 'given to Distance constructor') + + # now we have arguments like for a Quantity, so let it do the work + distance = super(Distance, cls).__new__( + cls, value, unit, dtype=dtype, copy=copy, order=order, + subok=subok, ndmin=ndmin) + + if not allow_negative and np.any(distance.value < 0): + raise ValueError("Distance must be >= 0. Use the argument " + "'allow_negative=True' to allow negative values.") + + return distance + + @property + def z(self): + """Short for ``self.compute_z()``""" + return self.compute_z() + + def compute_z(self, cosmology=None): + """ + The redshift for this distance assuming its physical distance is + a luminosity distance. + + Parameters + ---------- + cosmology : ``Cosmology`` or `None` + The cosmology to assume for this calculation, or `None` to use the + current cosmology (see `astropy.cosmology` for details). + + Returns + ------- + z : float + The redshift of this distance given the provided ``cosmology``. + """ + + if cosmology is None: + from ..cosmology import default_cosmology + cosmology = default_cosmology.get() + + from ..cosmology import z_at_value + return z_at_value(cosmology.luminosity_distance, self, ztol=1.e-10) + + @property + def distmod(self): + """The distance modulus as a `~astropy.units.Quantity`""" + val = 5. * np.log10(self.to_value(u.pc)) - 5. + return u.Quantity(val, u.mag, copy=False) + + @classmethod + def _distmod_to_pc(cls, dm): + dm = u.Quantity(dm, u.mag) + return cls(10 ** ((dm.value + 5) / 5.), u.pc, copy=False) + + +def _convert_to_and_validate_length_unit(unit, allow_dimensionless=False): + """ + raises UnitsError if not a length unit + """ + try: + unit = u.Unit(unit) + assert (unit.is_equivalent(u.kpc) or + allow_dimensionless and unit == u.dimensionless_unscaled) + except (TypeError, AssertionError): + raise u.UnitsError('Unit "{0}" is not a length type'.format(unit)) + + return unit diff --git a/astropy/coordinates/earth.py b/astropy/coordinates/earth.py new file mode 100644 index 0000000..bdd941d --- /dev/null +++ b/astropy/coordinates/earth.py @@ -0,0 +1,743 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import absolute_import, division, print_function + +from warnings import warn +import collections +import socket +import json + +import numpy as np +from .. import units as u +from .. import constants as consts +from ..units.quantity import QuantityInfoBase +from ..extern import six +from ..extern.six.moves import urllib +from ..utils.exceptions import AstropyUserWarning +from ..utils.compat.numpycompat import NUMPY_LT_1_12 +from ..utils.compat.numpy import broadcast_to +from .angles import Longitude, Latitude +from .representation import CartesianRepresentation, CartesianDifferential +from .errors import UnknownSiteException +from ..utils import data, deprecated + +try: + # Not guaranteed available at setup time. + from .. import _erfa as erfa +except ImportError: + if not _ASTROPY_SETUP_: + raise + +__all__ = ['EarthLocation'] + +GeodeticLocation = collections.namedtuple('GeodeticLocation', ['lon', 'lat', 'height']) + +# Available ellipsoids (defined in erfam.h, with numbers exposed in erfa). +ELLIPSOIDS = ('WGS84', 'GRS80', 'WGS72') + +OMEGA_EARTH = u.Quantity(7.292115855306589e-5, 1./u.s) +""" +Rotational velocity of Earth. In UT1 seconds, this would be 2 pi / (24 * 3600), +but we need the value in SI seconds. +See Explanatory Supplement to the Astronomical Almanac, ed. P. Kenneth Seidelmann (1992), +University Science Books. +""" + + +def _check_ellipsoid(ellipsoid=None, default='WGS84'): + if ellipsoid is None: + ellipsoid = default + if ellipsoid not in ELLIPSOIDS: + raise ValueError('Ellipsoid {0} not among known ones ({1})' + .format(ellipsoid, ELLIPSOIDS)) + return ellipsoid + + +def _get_json_result(url, err_str): + # need to do this here to prevent a series of complicated circular imports + from .name_resolve import NameResolveError + try: + # Retrieve JSON response from Google maps API + resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout) + resp_data = json.loads(resp.read().decode('utf8')) + + except urllib.error.URLError as e: + # This catches a timeout error, see: + # http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python + if isinstance(e.reason, socket.timeout): + raise NameResolveError(err_str.format(msg="connection timed out")) + else: + raise NameResolveError(err_str.format(msg=e.reason)) + + except socket.timeout: + # There are some cases where urllib2 does not catch socket.timeout + # especially while receiving response data on an already previously + # working request + raise NameResolveError(err_str.format(msg="connection timed out")) + + results = resp_data.get('results', []) + + if not results: + raise NameResolveError(err_str.format(msg="no results returned")) + + if resp_data.get('status', None) != 'OK': + raise NameResolveError(err_str.format(msg="unknown failure with Google maps API")) + + return results + + +class EarthLocationInfo(QuantityInfoBase): + """ + Container for meta information like name, description, format. This is + required when the object is used as a mixin column within a table, but can + be used as a general way to store meta information. + """ + _represent_as_dict_attrs = ('x', 'y', 'z', 'ellipsoid') + + def _construct_from_dict(self, map): + # Need to pop ellipsoid off and update post-instantiation. This is + # on the to-fix list in #4261. + ellipsoid = map.pop('ellipsoid') + out = self._parent_cls(**map) + out.ellipsoid = ellipsoid + return out + + def new_like(self, cols, length, metadata_conflicts='warn', name=None): + """ + Return a new EarthLocation instance which is consistent with the + input ``cols`` and has ``length`` rows. + + This is intended for creating an empty column object whose elements can + be set in-place for table operations like join or vstack. + + Parameters + ---------- + cols : list + List of input columns + length : int + Length of the output column object + metadata_conflicts : str ('warn'|'error'|'silent') + How to handle metadata conflicts + name : str + Output column name + + Returns + ------- + col : EarthLocation (or subclass) + Empty instance of this class consistent with ``cols`` + """ + # Very similar to QuantityInfo.new_like, but the creation of the + # map is different enough that this needs its own rouinte. + # Get merged info attributes shape, dtype, format, description. + attrs = self.merge_cols_attributes(cols, metadata_conflicts, name, + ('meta', 'format', 'description')) + # The above raises an error if the dtypes do not match, but returns + # just the string representation, which is not useful, so remove. + attrs.pop('dtype') + # Make empty EarthLocation using the dtype and unit of the last column. + # Use zeros so we do not get problems for possible conversion to + # geodetic coordinates. + shape = (length,) + attrs.pop('shape') + data = u.Quantity(np.zeros(shape=shape, dtype=cols[0].dtype), + unit=cols[0].unit, copy=False) + # Get arguments needed to reconstruct class + map = {key: (data[key] if key in 'xyz' else getattr(cols[-1], key)) + for key in self._represent_as_dict_attrs} + out = self._construct_from_dict(map) + # Set remaining info attributes + for attr, value in attrs.items(): + setattr(out.info, attr, value) + + return out + + +class EarthLocation(u.Quantity): + """ + Location on the Earth. + + Initialization is first attempted assuming geocentric (x, y, z) coordinates + are given; if that fails, another attempt is made assuming geodetic + coordinates (longitude, latitude, height above a reference ellipsoid). + When using the geodetic forms, Longitudes are measured increasing to the + east, so west longitudes are negative. Internally, the coordinates are + stored as geocentric. + + To ensure a specific type of coordinates is used, use the corresponding + class methods (`from_geocentric` and `from_geodetic`) or initialize the + arguments with names (``x``, ``y``, ``z`` for geocentric; ``lon``, ``lat``, + ``height`` for geodetic). See the class methods for details. + + + Notes + ----- + This class fits into the coordinates transformation framework in that it + encodes a position on the `~astropy.coordinates.ITRS` frame. To get a + proper `~astropy.coordinates.ITRS` object from this object, use the ``itrs`` + property. + """ + + _ellipsoid = 'WGS84' + _location_dtype = np.dtype({'names': ['x', 'y', 'z'], + 'formats': [np.float64]*3}) + _array_dtype = np.dtype((np.float64, (3,))) + + info = EarthLocationInfo() + + def __new__(cls, *args, **kwargs): + # TODO: needs copy argument and better dealing with inputs. + if (len(args) == 1 and len(kwargs) == 0 and + isinstance(args[0], EarthLocation)): + return args[0].copy() + try: + self = cls.from_geocentric(*args, **kwargs) + except (u.UnitsError, TypeError) as exc_geocentric: + try: + self = cls.from_geodetic(*args, **kwargs) + except Exception as exc_geodetic: + raise TypeError('Coordinates could not be parsed as either ' + 'geocentric or geodetic, with respective ' + 'exceptions "{0}" and "{1}"' + .format(exc_geocentric, exc_geodetic)) + return self + + @classmethod + def from_geocentric(cls, x, y, z, unit=None): + """ + Location on Earth, initialized from geocentric coordinates. + + Parameters + ---------- + x, y, z : `~astropy.units.Quantity` or array-like + Cartesian coordinates. If not quantities, ``unit`` should be given. + unit : `~astropy.units.UnitBase` object or None + Physical unit of the coordinate values. If ``x``, ``y``, and/or + ``z`` are quantities, they will be converted to this unit. + + Raises + ------ + astropy.units.UnitsError + If the units on ``x``, ``y``, and ``z`` do not match or an invalid + unit is given. + ValueError + If the shapes of ``x``, ``y``, and ``z`` do not match. + TypeError + If ``x`` is not a `~astropy.units.Quantity` and no unit is given. + """ + if unit is None: + try: + unit = x.unit + except AttributeError: + raise TypeError("Geocentric coordinates should be Quantities " + "unless an explicit unit is given.") + else: + unit = u.Unit(unit) + + if unit.physical_type != 'length': + raise u.UnitsError("Geocentric coordinates should be in " + "units of length.") + + try: + x = u.Quantity(x, unit, copy=False) + y = u.Quantity(y, unit, copy=False) + z = u.Quantity(z, unit, copy=False) + except u.UnitsError: + raise u.UnitsError("Geocentric coordinate units should all be " + "consistent.") + + x, y, z = np.broadcast_arrays(x, y, z) + struc = np.empty(x.shape, cls._location_dtype) + struc['x'], struc['y'], struc['z'] = x, y, z + return super(EarthLocation, cls).__new__(cls, struc, unit, copy=False) + + @classmethod + def from_geodetic(cls, lon, lat, height=0., ellipsoid=None): + """ + Location on Earth, initialized from geodetic coordinates. + + Parameters + ---------- + lon : `~astropy.coordinates.Longitude` or float + Earth East longitude. Can be anything that initialises an + `~astropy.coordinates.Angle` object (if float, in degrees). + lat : `~astropy.coordinates.Latitude` or float + Earth latitude. Can be anything that initialises an + `~astropy.coordinates.Latitude` object (if float, in degrees). + height : `~astropy.units.Quantity` or float, optional + Height above reference ellipsoid (if float, in meters; default: 0). + ellipsoid : str, optional + Name of the reference ellipsoid to use (default: 'WGS84'). + Available ellipsoids are: 'WGS84', 'GRS80', 'WGS72'. + + Raises + ------ + astropy.units.UnitsError + If the units on ``lon`` and ``lat`` are inconsistent with angular + ones, or that on ``height`` with a length. + ValueError + If ``lon``, ``lat``, and ``height`` do not have the same shape, or + if ``ellipsoid`` is not recognized as among the ones implemented. + + Notes + ----- + For the conversion to geocentric coordinates, the ERFA routine + ``gd2gc`` is used. See https://github.com/liberfa/erfa + """ + ellipsoid = _check_ellipsoid(ellipsoid, default=cls._ellipsoid) + lon = Longitude(lon, u.degree, wrap_angle=180*u.degree, copy=False) + lat = Latitude(lat, u.degree, copy=False) + # don't convert to m by default, so we can use the height unit below. + if not isinstance(height, u.Quantity): + height = u.Quantity(height, u.m, copy=False) + # convert to float in units required for erfa routine, and ensure + # all broadcast to same shape, and are at least 1-dimensional. + _lon, _lat, _height = np.broadcast_arrays(lon.to_value(u.radian), + lat.to_value(u.radian), + height.to_value(u.m)) + # get geocentric coordinates. Have to give one-dimensional array. + xyz = erfa.gd2gc(getattr(erfa, ellipsoid), _lon.ravel(), + _lat.ravel(), _height.ravel()) + self = xyz.view(cls._location_dtype, cls).reshape(_lon.shape) + self._unit = u.meter + self._ellipsoid = ellipsoid + return self.to(height.unit) + + @classmethod + def of_site(cls, site_name): + """ + Return an object of this class for a known observatory/site by name. + + This is intended as a quick convenience function to get basic site + information, not a fully-featured exhaustive registry of observatories + and all their properties. + + .. note:: + When this function is called, it will attempt to download site + information from the astropy data server. If you would like a site + to be added, issue a pull request to the + `astropy-data repository `_ . + If a site cannot be found in the registry (i.e., an internet + connection is not available), it will fall back on a built-in list, + In the future, this bundled list might include a version-controlled + list of canonical observatories extracted from the online version, + but it currently only contains the Greenwich Royal Observatory as an + example case. + + + Parameters + ---------- + site_name : str + Name of the observatory (case-insensitive). + + Returns + ------- + site : This class (a `~astropy.coordinates.EarthLocation` or subclass) + The location of the observatory. + + See Also + -------- + get_site_names : the list of sites that this function can access + """ + registry = cls._get_site_registry() + try: + el = registry[site_name] + except UnknownSiteException as e: + raise UnknownSiteException(e.site, 'EarthLocation.get_site_names', close_names=e.close_names) + + if cls is el.__class__: + return el + else: + newel = cls.from_geodetic(*el.to_geodetic()) + newel.info.name = el.info.name + return newel + + @classmethod + def of_address(cls, address, get_height=False): + """ + Return an object of this class for a given address by querying the Google + maps geocoding API. + + This is intended as a quick convenience function to get fast access to + locations. In the background, this just issues a query to the Google maps + geocoding API. It is not meant to be abused! Google uses IP-based query + limiting and will ban your IP if you send more than a few thousand queries + per hour [1]_. + + .. warning:: + If the query returns more than one location (e.g., searching on + ``address='springfield'``), this function will use the **first** returned + location. + + Parameters + ---------- + address : str + The address to get the location for. As per the Google maps API, this + can be a fully specified street address (e.g., 123 Main St., New York, + NY) or a city name (e.g., Danbury, CT), or etc. + get_height : bool (optional) + Use the retrieved location to perform a second query to the Google maps + elevation API to retrieve the height of the input address [2]_. + + Returns + ------- + location : This class (a `~astropy.coordinates.EarthLocation` or subclass) + The location of the input address. + + References + ---------- + .. [1] https://developers.google.com/maps/documentation/geocoding/intro + .. [2] https://developers.google.com/maps/documentation/elevation/intro + + """ + + pars = urllib.parse.urlencode({'address': address}) + geo_url = "https://maps.googleapis.com/maps/api/geocode/json?{0}".format(pars) + + # get longitude and latitude location + err_str = ("Unable to retrieve coordinates for address '{address}'; {{msg}}" + .format(address=address)) + geo_result = _get_json_result(geo_url, err_str=err_str) + loc = geo_result[0]['geometry']['location'] + + if get_height: + pars = {'locations': '{lat:.8f},{lng:.8f}'.format(lat=loc['lat'], + lng=loc['lng'])} + pars = urllib.parse.urlencode(pars) + ele_url = "https://maps.googleapis.com/maps/api/elevation/json?{0}".format(pars) + + err_str = ("Unable to retrieve elevation for address '{address}'; {{msg}}" + .format(address=address)) + ele_result = _get_json_result(ele_url, err_str=err_str) + height = ele_result[0]['elevation']*u.meter + + else: + height = 0. + + return cls.from_geodetic(lon=loc['lng']*u.degree, + lat=loc['lat']*u.degree, + height=height) + + @classmethod + def get_site_names(cls): + """ + Get list of names of observatories for use with + `~astropy.coordinates.EarthLocation.of_site`. + + .. note:: + When this function is called, it will first attempt to + download site information from the astropy data server. If it + cannot (i.e., an internet connection is not available), it will fall + back on the list included with astropy (which is a limited and dated + set of sites). If you think a site should be added, issue a pull + request to the + `astropy-data repository `_ . + + + Returns + ------- + names : list of str + List of valid observatory names + + See Also + -------- + of_site : Gets the actual location object for one of the sites names + this returns. + """ + return cls._get_site_registry().names + + @classmethod + def _get_site_registry(cls, force_download=False, force_builtin=False): + """ + Gets the site registry. The first time this either downloads or loads + from the data file packaged with astropy. Subsequent calls will use the + cached version unless explicitly overridden. + + Parameters + ---------- + force_download : bool or str + If not False, force replacement of the cached registry with a + downloaded version. If a str, that will be used as the URL to + download from (if just True, the default URL will be used). + force_builtin : bool + If True, load from the data file bundled with astropy and set the + cache to that. + + returns + ------- + reg : astropy.coordinates.sites.SiteRegistry + """ + if force_builtin and force_download: + raise ValueError('Cannot have both force_builtin and force_download True') + + if force_builtin: + reg = cls._site_registry = get_builtin_sites() + else: + reg = getattr(cls, '_site_registry', None) + if force_download or not reg: + try: + if isinstance(force_download, six.string_types): + reg = get_downloaded_sites(force_download) + else: + reg = get_downloaded_sites() + except (six.moves.urllib.error.URLError, IOError): + # In Python 2.7 the IOError raised by @remote_data stays as + # is, while in Python 3.6 the IOError gets converted to a + # URLError, so we catch IOError above too, but this can be + # removed once we don't support Python 2.7 anymore. + if force_download: + raise + msg = ('Could not access the online site list. Falling ' + 'back on the built-in version, which is rather ' + 'limited. If you want to retry the download, do ' + '{0}._get_site_registry(force_download=True)') + warn(AstropyUserWarning(msg.format(cls.__name__))) + reg = get_builtin_sites() + cls._site_registry = reg + + return reg + + @property + def ellipsoid(self): + """The default ellipsoid used to convert to geodetic coordinates.""" + return self._ellipsoid + + @ellipsoid.setter + def ellipsoid(self, ellipsoid): + self._ellipsoid = _check_ellipsoid(ellipsoid) + + @property + def geodetic(self): + """Convert to geodetic coordinates for the default ellipsoid.""" + return self.to_geodetic() + + def to_geodetic(self, ellipsoid=None): + """Convert to geodetic coordinates. + + Parameters + ---------- + ellipsoid : str, optional + Reference ellipsoid to use. Default is the one the coordinates + were initialized with. Available are: 'WGS84', 'GRS80', 'WGS72' + + Returns + ------- + (lon, lat, height) : tuple + The tuple contains instances of `~astropy.coordinates.Longitude`, + `~astropy.coordinates.Latitude`, and `~astropy.units.Quantity` + + Raises + ------ + ValueError + if ``ellipsoid`` is not recognized as among the ones implemented. + + Notes + ----- + For the conversion to geodetic coordinates, the ERFA routine + ``gc2gd`` is used. See https://github.com/liberfa/erfa + """ + ellipsoid = _check_ellipsoid(ellipsoid, default=self.ellipsoid) + self_array = self.to(u.meter).view(self._array_dtype, np.ndarray) + lon, lat, height = erfa.gc2gd(getattr(erfa, ellipsoid), self_array) + return GeodeticLocation( + Longitude(lon * u.radian, u.degree, + wrap_angle=180.*u.degree, copy=False), + Latitude(lat * u.radian, u.degree, copy=False), + u.Quantity(height * u.meter, self.unit, copy=False)) + + @property + @deprecated('2.0', alternative='`lon`', obj_type='property') + def longitude(self): + """Longitude of the location, for the default ellipsoid.""" + return self.geodetic[0] + + @property + def lon(self): + """Longitude of the location, for the default ellipsoid.""" + return self.geodetic[0] + + @property + @deprecated('2.0', alternative='`lat`', obj_type='property') + def latitude(self): + """Latitude of the location, for the default ellipsoid.""" + return self.geodetic[1] + + @property + def lat(self): + """Longitude of the location, for the default ellipsoid.""" + return self.geodetic[1] + + @property + def height(self): + """Height of the location, for the default ellipsoid.""" + return self.geodetic[2] + + # mostly for symmetry with geodetic and to_geodetic. + @property + def geocentric(self): + """Convert to a tuple with X, Y, and Z as quantities""" + return self.to_geocentric() + + def to_geocentric(self): + """Convert to a tuple with X, Y, and Z as quantities""" + return (self.x, self.y, self.z) + + def get_itrs(self, obstime=None): + """ + Generates an `~astropy.coordinates.ITRS` object with the location of + this object at the requested ``obstime``. + + Parameters + ---------- + obstime : `~astropy.time.Time` or None + The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or + if None, the default ``obstime`` will be used. + + Returns + ------- + itrs : `~astropy.coordinates.ITRS` + The new object in the ITRS frame + """ + # Broadcast for a single position at multiple times, but don't attempt + # to be more general here. + if obstime and self.size == 1 and obstime.size > 1: + self = broadcast_to(self, obstime.shape, subok=True) + + # do this here to prevent a series of complicated circular imports + from .builtin_frames import ITRS + return ITRS(x=self.x, y=self.y, z=self.z, obstime=obstime) + + itrs = property(get_itrs, doc="""An `~astropy.coordinates.ITRS` object with + for the location of this object at the + default ``obstime``.""") + + def _get_gcrs(self, obstime): + """GCRS position with velocity at ``obstime`` as a GCRS coordinate. + + Parameters + ---------- + obstime : `~astropy.time.Time` + The ``obstime`` to calculate the GCRS position/velocity at. + + Returns + -------- + gcrs : `~astropy.coordinates.GCRS` instance + With velocity included. + """ + # do this here to prevent a series of complicated circular imports + from .builtin_frames import GCRS + + itrs = self.get_itrs(obstime) + # Assume the observatory itself is fixed on the ground. + # We do a direct assignment rather than an update to avoid validation + # and creation of a new object. + zeros = broadcast_to(0. * u.km / u.s, (3,) + itrs.shape, subok=True) + itrs.data.differentials['s'] = CartesianDifferential(zeros) + return itrs.transform_to(GCRS(obstime=obstime)) + + def get_gcrs_posvel(self, obstime): + """ + Calculate the GCRS position and velocity of this object at the + requested ``obstime``. + + Parameters + ---------- + obstime : `~astropy.time.Time` + The ``obstime`` to calculate the GCRS position/velocity at. + + Returns + -------- + obsgeoloc : `~astropy.coordinates.CartesianRepresentation` + The GCRS position of the object + obsgeovel : `~astropy.coordinates.CartesianRepresentation` + The GCRS velocity of the object + """ + # GCRS position + gcrs_data = self._get_gcrs(obstime).data + obsgeopos = gcrs_data.without_differentials() + obsgeovel = gcrs_data.differentials['s'].to_cartesian() + return obsgeopos, obsgeovel + + def _gravitational_redshift(self, obstime): + """Return the gravitational redshift at this EarthLocation. + + Calculates the gravitational redshift, of order 3 m/s, due to the Sun, + Jupiter, the Moon, and the Earth itself. + + Parameters + ---------- + obstime : `~astropy.time.Time` + The ``obstime`` to calculate the redshift at. + + Returns + -------- + redshift : `~astropy.units.Quantity` + Gravitational redshift in velocity units at given obstime. + """ + # needs to be here to avoid circular imports + from .solar_system import get_body_barycentric + names = ('sun', 'jupiter', 'moon', 'earth') + GM_moon = consts.G * 7.34767309e22*u.kg + masses = (consts.GM_sun, consts.GM_jup, GM_moon, consts.GM_earth) + positions = [get_body_barycentric(name, obstime) for name in names] + # Calculate distances to objects other than earth. + distances = [(pos - positions[-1]).norm() for pos in positions[:-1]] + # Append distance from Earth's center for Earth's contribution. + distances.append(CartesianRepresentation(self.geocentric).norm()) + # Get redshifts due to all objects. + redshifts = [-GM / consts.c / distance for (GM, distance) in + zip(masses, distances)] + return sum(redshifts) + + @property + def x(self): + """The X component of the geocentric coordinates.""" + return self['x'] + + @property + def y(self): + """The Y component of the geocentric coordinates.""" + return self['y'] + + @property + def z(self): + """The Z component of the geocentric coordinates.""" + return self['z'] + + def __getitem__(self, item): + result = super(EarthLocation, self).__getitem__(item) + if result.dtype is self.dtype: + return result.view(self.__class__) + else: + return result.view(u.Quantity) + + def __array_finalize__(self, obj): + super(EarthLocation, self).__array_finalize__(obj) + if hasattr(obj, '_ellipsoid'): + self._ellipsoid = obj._ellipsoid + + def __len__(self): + if self.shape == (): + raise IndexError('0-d EarthLocation arrays cannot be indexed') + else: + return super(EarthLocation, self).__len__() + + def _to_value(self, unit, equivalencies=[]): + """Helper method for to and to_value.""" + # Conversion to another unit in both ``to`` and ``to_value`` goes + # via this routine. To make the regular quantity routines work, we + # temporarily turn the structured array into a regular one. + array_view = self.view(self._array_dtype, np.ndarray) + if equivalencies == []: + equivalencies = self._equivalencies + new_array = self.unit.to(unit, array_view, equivalencies=equivalencies) + return new_array.view(self.dtype).reshape(self.shape) + + if NUMPY_LT_1_12: + def __repr__(self): + # Use the numpy >=1.12 way to format structured arrays. + from .representation import _array2string + prefixstr = '<' + self.__class__.__name__ + ' ' + arrstr = _array2string(self.view(np.ndarray), prefix=prefixstr) + return '{0}{1}{2:s}>'.format(prefixstr, arrstr, self._unitstr) + + +# need to do this here at the bottom to avoid circular dependencies +from .sites import get_builtin_sites, get_downloaded_sites diff --git a/astropy/coordinates/earth_orientation.py b/astropy/coordinates/earth_orientation.py new file mode 100644 index 0000000..1ac9539 --- /dev/null +++ b/astropy/coordinates/earth_orientation.py @@ -0,0 +1,413 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module contains standard functions for earth orientation, such as +precession and nutation. + +This module is (currently) not intended to be part of the public API, but +is instead primarily for internal use in `coordinates` +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from ..time import Time +from .. import units as u +from .matrix_utilities import rotation_matrix, matrix_product, matrix_transpose + + +jd1950 = Time('B1950', scale='tai').jd +jd2000 = Time('J2000', scale='utc').jd +_asecperrad = u.radian.to(u.arcsec) + + +def eccentricity(jd): + """ + Eccentricity of the Earth's orbit at the requested Julian Date. + + Parameters + ---------- + jd : scalar or array-like + Julian date at which to compute the eccentricity + + returns + ------- + eccentricity : scalar or array + The eccentricity (or array of eccentricities) + + References + ---------- + * Explanatory Supplement to the Astronomical Almanac: P. Kenneth + Seidelmann (ed), University Science Books (1992). + """ + T = (jd - jd1950) / 36525.0 + + p = (-0.000000126, - 0.00004193, 0.01673011) + + return np.polyval(p, T) + + +def mean_lon_of_perigee(jd): + """ + Computes the mean longitude of perigee of the Earth's orbit at the + requested Julian Date. + + Parameters + ---------- + jd : scalar or array-like + Julian date at which to compute the mean longitude of perigee + + returns + ------- + mean_lon_of_perigee : scalar or array + Mean longitude of perigee in degrees (or array of mean longitudes) + + References + ---------- + * Explanatory Supplement to the Astronomical Almanac: P. Kenneth + Seidelmann (ed), University Science Books (1992). + """ + T = (jd - jd1950) / 36525.0 + + p = (0.012, 1.65, 6190.67, 1015489.951) + + return np.polyval(p, T) / 3600. + + +def obliquity(jd, algorithm=2006): + """ + Computes the obliquity of the Earth at the requested Julian Date. + + Parameters + ---------- + jd : scalar or array-like + Julian date at which to compute the obliquity + algorithm : int + Year of algorithm based on IAU adoption. Can be 2006, 2000 or 1980. The + 2006 algorithm is mentioned in Circular 179, but the canonical reference + for the IAU adoption is apparently Hilton et al. 06 is composed of the + 1980 algorithm with a precession-rate correction due to the 2000 + precession models, and a description of the 1980 algorithm can be found + in the Explanatory Supplement to the Astronomical Almanac. + + returns + ------- + obliquity : scalar or array + Mean obliquity in degrees (or array of obliquities) + + References + ---------- + * Hilton, J. et al., 2006, Celest.Mech.Dyn.Astron. 94, 351. 2000 + * USNO Circular 179 + * Explanatory Supplement to the Astronomical Almanac: P. Kenneth + Seidelmann (ed), University Science Books (1992). + """ + T = (jd - jd2000) / 36525.0 + + if algorithm == 2006: + p = (-0.0000000434, -0.000000576, 0.00200340, -0.0001831, -46.836769, 84381.406) + corr = 0 + elif algorithm == 2000: + p = (0.001813, -0.00059, -46.8150, 84381.448) + corr = -0.02524 * T + elif algorithm == 1980: + p = (0.001813, -0.00059, -46.8150, 84381.448) + corr = 0 + else: + raise ValueError('invalid algorithm year for computing obliquity') + + return (np.polyval(p, T) + corr) / 3600. + + +# TODO: replace this with SOFA equivalent +def precession_matrix_Capitaine(fromepoch, toepoch): + """ + Computes the precession matrix from one Julian epoch to another. + The exact method is based on Capitaine et al. 2003, which should + match the IAU 2006 standard. + + Parameters + ---------- + fromepoch : `~astropy.time.Time` + The epoch to precess from. + toepoch : `~astropy.time.Time` + The epoch to precess to. + + Returns + ------- + pmatrix : 3x3 array + Precession matrix to get from ``fromepoch`` to ``toepoch`` + + References + ---------- + USNO Circular 179 + """ + mat_fromto2000 = matrix_transpose( + _precess_from_J2000_Capitaine(fromepoch.jyear)) + mat_2000toto = _precess_from_J2000_Capitaine(toepoch.jyear) + + return np.dot(mat_2000toto, mat_fromto2000) + + +def _precess_from_J2000_Capitaine(epoch): + """ + Computes the precession matrix from J2000 to the given Julian Epoch. + Expression from from Capitaine et al. 2003 as expressed in the USNO + Circular 179. This should match the IAU 2006 standard from SOFA. + + Parameters + ---------- + epoch : scalar + The epoch as a Julian year number (e.g. J2000 is 2000.0) + + """ + T = (epoch - 2000.0) / 100.0 + # from USNO circular + pzeta = (-0.0000003173, -0.000005971, 0.01801828, 0.2988499, 2306.083227, 2.650545) + pz = (-0.0000002904, -0.000028596, 0.01826837, 1.0927348, 2306.077181, -2.650545) + ptheta = (-0.0000001274, -0.000007089, -0.04182264, -0.4294934, 2004.191903, 0) + zeta = np.polyval(pzeta, T) / 3600.0 + z = np.polyval(pz, T) / 3600.0 + theta = np.polyval(ptheta, T) / 3600.0 + + return matrix_product(rotation_matrix(-z, 'z'), + rotation_matrix(theta, 'y'), + rotation_matrix(-zeta, 'z')) + + +def _precession_matrix_besselian(epoch1, epoch2): + """ + Computes the precession matrix from one Besselian epoch to another using + Newcomb's method. + + ``epoch1`` and ``epoch2`` are in Besselian year numbers. + """ + # tropical years + t1 = (epoch1 - 1850.0) / 1000.0 + t2 = (epoch2 - 1850.0) / 1000.0 + dt = t2 - t1 + + zeta1 = 23035.545 + t1 * 139.720 + 0.060 * t1 * t1 + zeta2 = 30.240 - 0.27 * t1 + zeta3 = 17.995 + pzeta = (zeta3, zeta2, zeta1, 0) + zeta = np.polyval(pzeta, dt) / 3600 + + z1 = 23035.545 + t1 * 139.720 + 0.060 * t1 * t1 + z2 = 109.480 + 0.39 * t1 + z3 = 18.325 + pz = (z3, z2, z1, 0) + z = np.polyval(pz, dt) / 3600 + + theta1 = 20051.12 - 85.29 * t1 - 0.37 * t1 * t1 + theta2 = -42.65 - 0.37 * t1 + theta3 = -41.8 + ptheta = (theta3, theta2, theta1, 0) + theta = np.polyval(ptheta, dt) / 3600 + + return matrix_product(rotation_matrix(-z, 'z'), + rotation_matrix(theta, 'y'), + rotation_matrix(-zeta, 'z')) + + +def _load_nutation_data(datastr, seriestype): + """ + Loads nutation series from data stored in string form. + + Seriestype can be 'lunisolar' or 'planetary' + """ + + if seriestype == 'lunisolar': + dtypes = [('nl', int), + ('nlp', int), + ('nF', int), + ('nD', int), + ('nOm', int), + ('ps', float), + ('pst', float), + ('pc', float), + ('ec', float), + ('ect', float), + ('es', float)] + elif seriestype == 'planetary': + dtypes = [('nl', int), + ('nF', int), + ('nD', int), + ('nOm', int), + ('nme', int), + ('nve', int), + ('nea', int), + ('nma', int), + ('nju', int), + ('nsa', int), + ('nur', int), + ('nne', int), + ('npa', int), + ('sp', int), + ('cp', int), + ('se', int), + ('ce', int)] + else: + raise ValueError('requested invalid nutation series type') + + lines = [l for l in datastr.split('\n') + if not l.startswith('#') if not l.strip() == ''] + + lists = [[] for _ in dtypes] + for l in lines: + for i, e in enumerate(l.split(' ')): + lists[i].append(dtypes[i][1](e)) + return np.rec.fromarrays(lists, names=[e[0] for e in dtypes]) + + +_nut_data_00b = """ +#l lprime F D Omega longitude_sin longitude_sin*t longitude_cos obliquity_cos obliquity_cos*t,obliquity_sin + +0 0 0 0 1 -172064161.0 -174666.0 33386.0 92052331.0 9086.0 15377.0 +0 0 2 -2 2 -13170906.0 -1675.0 -13696.0 5730336.0 -3015.0 -4587.0 +0 0 2 0 2 -2276413.0 -234.0 2796.0 978459.0 -485.0 1374.0 +0 0 0 0 2 2074554.0 207.0 -698.0 -897492.0 470.0 -291.0 +0 1 0 0 0 1475877.0 -3633.0 11817.0 73871.0 -184.0 -1924.0 +0 1 2 -2 2 -516821.0 1226.0 -524.0 224386.0 -677.0 -174.0 +1 0 0 0 0 711159.0 73.0 -872.0 -6750.0 0.0 358.0 +0 0 2 0 1 -387298.0 -367.0 380.0 200728.0 18.0 318.0 +1 0 2 0 2 -301461.0 -36.0 816.0 129025.0 -63.0 367.0 +0 -1 2 -2 2 215829.0 -494.0 111.0 -95929.0 299.0 132.0 +0 0 2 -2 1 128227.0 137.0 181.0 -68982.0 -9.0 39.0 +-1 0 2 0 2 123457.0 11.0 19.0 -53311.0 32.0 -4.0 +-1 0 0 2 0 156994.0 10.0 -168.0 -1235.0 0.0 82.0 +1 0 0 0 1 63110.0 63.0 27.0 -33228.0 0.0 -9.0 +-1 0 0 0 1 -57976.0 -63.0 -189.0 31429.0 0.0 -75.0 +-1 0 2 2 2 -59641.0 -11.0 149.0 25543.0 -11.0 66.0 +1 0 2 0 1 -51613.0 -42.0 129.0 26366.0 0.0 78.0 +-2 0 2 0 1 45893.0 50.0 31.0 -24236.0 -10.0 20.0 +0 0 0 2 0 63384.0 11.0 -150.0 -1220.0 0.0 29.0 +0 0 2 2 2 -38571.0 -1.0 158.0 16452.0 -11.0 68.0 +0 -2 2 -2 2 32481.0 0.0 0.0 -13870.0 0.0 0.0 +-2 0 0 2 0 -47722.0 0.0 -18.0 477.0 0.0 -25.0 +2 0 2 0 2 -31046.0 -1.0 131.0 13238.0 -11.0 59.0 +1 0 2 -2 2 28593.0 0.0 -1.0 -12338.0 10.0 -3.0 +-1 0 2 0 1 20441.0 21.0 10.0 -10758.0 0.0 -3.0 +2 0 0 0 0 29243.0 0.0 -74.0 -609.0 0.0 13.0 +0 0 2 0 0 25887.0 0.0 -66.0 -550.0 0.0 11.0 +0 1 0 0 1 -14053.0 -25.0 79.0 8551.0 -2.0 -45.0 +-1 0 0 2 1 15164.0 10.0 11.0 -8001.0 0.0 -1.0 +0 2 2 -2 2 -15794.0 72.0 -16.0 6850.0 -42.0 -5.0 +0 0 -2 2 0 21783.0 0.0 13.0 -167.0 0.0 13.0 +1 0 0 -2 1 -12873.0 -10.0 -37.0 6953.0 0.0 -14.0 +0 -1 0 0 1 -12654.0 11.0 63.0 6415.0 0.0 26.0 +-1 0 2 2 1 -10204.0 0.0 25.0 5222.0 0.0 15.0 +0 2 0 0 0 16707.0 -85.0 -10.0 168.0 -1.0 10.0 +1 0 2 2 2 -7691.0 0.0 44.0 3268.0 0.0 19.0 +-2 0 2 0 0 -11024.0 0.0 -14.0 104.0 0.0 2.0 +0 1 2 0 2 7566.0 -21.0 -11.0 -3250.0 0.0 -5.0 +0 0 2 2 1 -6637.0 -11.0 25.0 3353.0 0.0 14.0 +0 -1 2 0 2 -7141.0 21.0 8.0 3070.0 0.0 4.0 +0 0 0 2 1 -6302.0 -11.0 2.0 3272.0 0.0 4.0 +1 0 2 -2 1 5800.0 10.0 2.0 -3045.0 0.0 -1.0 +2 0 2 -2 2 6443.0 0.0 -7.0 -2768.0 0.0 -4.0 +-2 0 0 2 1 -5774.0 -11.0 -15.0 3041.0 0.0 -5.0 +2 0 2 0 1 -5350.0 0.0 21.0 2695.0 0.0 12.0 +0 -1 2 -2 1 -4752.0 -11.0 -3.0 2719.0 0.0 -3.0 +0 0 0 -2 1 -4940.0 -11.0 -21.0 2720.0 0.0 -9.0 +-1 -1 0 2 0 7350.0 0.0 -8.0 -51.0 0.0 4.0 +2 0 0 -2 1 4065.0 0.0 6.0 -2206.0 0.0 1.0 +1 0 0 2 0 6579.0 0.0 -24.0 -199.0 0.0 2.0 +0 1 2 -2 1 3579.0 0.0 5.0 -1900.0 0.0 1.0 +1 -1 0 0 0 4725.0 0.0 -6.0 -41.0 0.0 3.0 +-2 0 2 0 2 -3075.0 0.0 -2.0 1313.0 0.0 -1.0 +3 0 2 0 2 -2904.0 0.0 15.0 1233.0 0.0 7.0 +0 -1 0 2 0 4348.0 0.0 -10.0 -81.0 0.0 2.0 +1 -1 2 0 2 -2878.0 0.0 8.0 1232.0 0.0 4.0 +0 0 0 1 0 -4230.0 0.0 5.0 -20.0 0.0 -2.0 +-1 -1 2 2 2 -2819.0 0.0 7.0 1207.0 0.0 3.0 +-1 0 2 0 0 -4056.0 0.0 5.0 40.0 0.0 -2.0 +0 -1 2 2 2 -2647.0 0.0 11.0 1129.0 0.0 5.0 +-2 0 0 0 1 -2294.0 0.0 -10.0 1266.0 0.0 -4.0 +1 1 2 0 2 2481.0 0.0 -7.0 -1062.0 0.0 -3.0 +2 0 0 0 1 2179.0 0.0 -2.0 -1129.0 0.0 -2.0 +-1 1 0 1 0 3276.0 0.0 1.0 -9.0 0.0 0.0 +1 1 0 0 0 -3389.0 0.0 5.0 35.0 0.0 -2.0 +1 0 2 0 0 3339.0 0.0 -13.0 -107.0 0.0 1.0 +-1 0 2 -2 1 -1987.0 0.0 -6.0 1073.0 0.0 -2.0 +1 0 0 0 2 -1981.0 0.0 0.0 854.0 0.0 0.0 +-1 0 0 1 0 4026.0 0.0 -353.0 -553.0 0.0 -139.0 +0 0 2 1 2 1660.0 0.0 -5.0 -710.0 0.0 -2.0 +-1 0 2 4 2 -1521.0 0.0 9.0 647.0 0.0 4.0 +-1 1 0 1 1 1314.0 0.0 0.0 -700.0 0.0 0.0 +0 -2 2 -2 1 -1283.0 0.0 0.0 672.0 0.0 0.0 +1 0 2 2 1 -1331.0 0.0 8.0 663.0 0.0 4.0 +-2 0 2 2 2 1383.0 0.0 -2.0 -594.0 0.0 -2.0 +-1 0 0 0 2 1405.0 0.0 4.0 -610.0 0.0 2.0 +1 1 2 -2 2 1290.0 0.0 0.0 -556.0 0.0 0.0 +"""[1:-1] +_nut_data_00b = _load_nutation_data(_nut_data_00b, 'lunisolar') + +# TODO: replace w/SOFA equivalent + + +def nutation_components2000B(jd): + """ + Computes nutation components following the IAU 2000B specification + + Parameters + ---------- + jd : scalar + epoch at which to compute the nutation components as a JD + + Returns + ------- + eps : float + epsilon in radians + dpsi : float + dpsi in radians + deps : float + depsilon in raidans + """ + epsa = np.radians(obliquity(jd, 2000)) + t = (jd - jd2000) / 36525 + + # Fundamental (Delaunay) arguments from Simon et al. (1994) via SOFA + # Mean anomaly of moon + el = ((485868.249036 + 1717915923.2178 * t) % 1296000) / _asecperrad + # Mean anomaly of sun + elp = ((1287104.79305 + 129596581.0481 * t) % 1296000) / _asecperrad + # Mean argument of the latitude of Moon + F = ((335779.526232 + 1739527262.8478 * t) % 1296000) / _asecperrad + # Mean elongation of the Moon from Sun + D = ((1072260.70369 + 1602961601.2090 * t) % 1296000) / _asecperrad + # Mean longitude of the ascending node of Moon + Om = ((450160.398036 + -6962890.5431 * t) % 1296000) / _asecperrad + + # compute nutation series using array loaded from data directory + dat = _nut_data_00b + arg = dat.nl * el + dat.nlp * elp + dat.nF * F + dat.nD * D + dat.nOm * Om + sarg = np.sin(arg) + carg = np.cos(arg) + + p1u_asecperrad = _asecperrad * 1e7 # 0.1 microasrcsecperrad + dpsils = np.sum((dat.ps + dat.pst * t) * sarg + dat.pc * carg) / p1u_asecperrad + depsls = np.sum((dat.ec + dat.ect * t) * carg + dat.es * sarg) / p1u_asecperrad + # fixed offset in place of planetary tersm + m_asecperrad = _asecperrad * 1e3 # milliarcsec per rad + dpsipl = -0.135 / m_asecperrad + depspl = 0.388 / m_asecperrad + + return epsa, dpsils + dpsipl, depsls + depspl # all in radians + + +def nutation_matrix(epoch): + """ + Nutation matrix generated from nutation components. + + Matrix converts from mean coordinate to true coordinate as + r_true = M * r_mean + """ + # TODO: implement higher precision 2006/2000A model if requested/needed + epsa, dpsi, deps = nutation_components2000B(epoch.jd) # all in radians + + return matrix_product(rotation_matrix(-(epsa + deps), 'x', False), + rotation_matrix(-dpsi, 'z', False), + rotation_matrix(epsa, 'x', False)) diff --git a/astropy/coordinates/errors.py b/astropy/coordinates/errors.py new file mode 100644 index 0000000..0285ffe --- /dev/null +++ b/astropy/coordinates/errors.py @@ -0,0 +1,177 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +''' This module defines custom errors and exceptions used in astropy.coordinates. +''' +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from ..utils.exceptions import AstropyWarning + +__all__ = ['RangeError', 'BoundsError', 'IllegalHourError', + 'IllegalMinuteError', 'IllegalSecondError', 'ConvertError', + 'IllegalHourWarning', 'IllegalMinuteWarning', 'IllegalSecondWarning', + 'UnknownSiteException'] + + +class RangeError(ValueError): + """ + Raised when some part of an angle is out of its valid range. + """ + + +class BoundsError(RangeError): + """ + Raised when an angle is outside of its user-specified bounds. + """ + + +class IllegalHourError(RangeError): + """ + Raised when an hour value is not in the range [0,24). + + Parameters + ---------- + hour : int, float + + Examples + -------- + + .. code-block:: python + + if not 0 <= hr < 24: + raise IllegalHourError(hour) + """ + def __init__(self, hour): + self.hour = hour + + def __str__(self): + return "An invalid value for 'hours' was found ('{0}'); must be in the range [0,24).".format(self.hour) + + +class IllegalHourWarning(AstropyWarning): + """ + Raised when an hour value is 24. + + Parameters + ---------- + hour : int, float + """ + def __init__(self, hour, alternativeactionstr=None): + self.hour = hour + self.alternativeactionstr = alternativeactionstr + + def __str__(self): + message = "'hour' was found to be '{0}', which is not in range (-24, 24).".format(self.hour) + if self.alternativeactionstr is not None: + message += ' ' + self.alternativeactionstr + return message + + +class IllegalMinuteError(RangeError): + """ + Raised when an minute value is not in the range [0,60]. + + Parameters + ---------- + minute : int, float + + Examples + -------- + + .. code-block:: python + + if not 0 <= min < 60: + raise IllegalMinuteError(minute) + + """ + def __init__(self, minute): + self.minute = minute + + def __str__(self): + return "An invalid value for 'minute' was found ('{0}'); should be in the range [0,60).".format(self.minute) + + +class IllegalMinuteWarning(AstropyWarning): + """ + Raised when a minute value is 60. + + Parameters + ---------- + minute : int, float + """ + def __init__(self, minute, alternativeactionstr=None): + self.minute = minute + self.alternativeactionstr = alternativeactionstr + + def __str__(self): + message = "'minute' was found to be '{0}', which is not in range [0,60).".format(self.minute) + if self.alternativeactionstr is not None: + message += ' ' + self.alternativeactionstr + return message + + +class IllegalSecondError(RangeError): + """ + Raised when an second value (time) is not in the range [0,60]. + + Parameters + ---------- + second : int, float + + Examples + -------- + + .. code-block:: python + + if not 0 <= sec < 60: + raise IllegalSecondError(second) + """ + def __init__(self, second): + self.second = second + + def __str__(self): + return "An invalid value for 'second' was found ('{0}'); should be in the range [0,60).".format(self.second) + + +class IllegalSecondWarning(AstropyWarning): + """ + Raised when a second value is 60. + + Parameters + ---------- + second : int, float + """ + def __init__(self, second, alternativeactionstr=None): + self.second = second + self.alternativeactionstr = alternativeactionstr + + def __str__(self): + message = "'second' was found to be '{0}', which is not in range [0,60).".format(self.second) + if self.alternativeactionstr is not None: + message += ' ' + self.alternativeactionstr + return message + + +# TODO: consider if this should be used to `units`? +class UnitsError(ValueError): + """ + Raised if units are missing or invalid. + """ + + +class ConvertError(Exception): + """ + Raised if a coordinate system cannot be converted to another + """ + + +class UnknownSiteException(KeyError): + def __init__(self, site, attribute, close_names=None): + message = "Site '{0}' not in database. Use {1} to see available sites.".format(site, attribute) + if close_names: + message += " Did you mean one of: '{0}'?'".format("', '".join(close_names)) + self.site = site + self.attribute = attribute + self.close_names = close_names + return super(UnknownSiteException, self).__init__(message) diff --git a/astropy/coordinates/funcs.py b/astropy/coordinates/funcs.py new file mode 100644 index 0000000..68de2ec --- /dev/null +++ b/astropy/coordinates/funcs.py @@ -0,0 +1,283 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module contains convenience functions for coordinate-related functionality. + +This is generally just wrapping around the object-oriented coordinates +framework, but it is useful for some users who are used to more functional +interfaces. +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from .. import units as u +from ..constants import c +from .. import _erfa as erfa +from ..io import ascii +from ..utils import isiterable, data +from .sky_coordinate import SkyCoord +from .builtin_frames import GCRS, PrecessedGeocentric +from .representation import SphericalRepresentation, CartesianRepresentation +from .builtin_frames.utils import get_jd12 + +__all__ = ['cartesian_to_spherical', 'spherical_to_cartesian', 'get_sun', + 'concatenate', 'get_constellation'] + + +def cartesian_to_spherical(x, y, z): + """ + Converts 3D rectangular cartesian coordinates to spherical polar + coordinates. + + Note that the resulting angles are latitude/longitude or + elevation/azimuthal form. I.e., the origin is along the equator + rather than at the north pole. + + .. note:: + This function simply wraps functionality provided by the + `~astropy.coordinates.CartesianRepresentation` and + `~astropy.coordinates.SphericalRepresentation` classes. In general, + for both performance and readability, we suggest using these classes + directly. But for situations where a quick one-off conversion makes + sense, this function is provided. + + Parameters + ---------- + x : scalar, array-like, or `~astropy.units.Quantity` + The first cartesian coordinate. + y : scalar, array-like, or `~astropy.units.Quantity` + The second cartesian coordinate. + z : scalar, array-like, or `~astropy.units.Quantity` + The third cartesian coordinate. + + Returns + ------- + r : `~astropy.units.Quantity` + The radial coordinate (in the same units as the inputs). + lat : `~astropy.units.Quantity` + The latitude in radians + lon : `~astropy.units.Quantity` + The longitude in radians + """ + if not hasattr(x, 'unit'): + x = x * u.dimensionless_unscaled + if not hasattr(y, 'unit'): + y = y * u.dimensionless_unscaled + if not hasattr(z, 'unit'): + z = z * u.dimensionless_unscaled + + cart = CartesianRepresentation(x, y, z) + sph = cart.represent_as(SphericalRepresentation) + + return sph.distance, sph.lat, sph.lon + + +def spherical_to_cartesian(r, lat, lon): + """ + Converts spherical polar coordinates to rectangular cartesian + coordinates. + + Note that the input angles should be in latitude/longitude or + elevation/azimuthal form. I.e., the origin is along the equator + rather than at the north pole. + + .. note:: + This is a low-level function used internally in + `astropy.coordinates`. It is provided for users if they really + want to use it, but it is recommended that you use the + `astropy.coordinates` coordinate systems. + + Parameters + ---------- + r : scalar, array-like, or `~astropy.units.Quantity` + The radial coordinate (in the same units as the inputs). + lat : scalar, array-like, or `~astropy.units.Quantity` + The latitude (in radians if array or scalar) + lon : scalar, array-like, or `~astropy.units.Quantity` + The longitude (in radians if array or scalar) + + Returns + ------- + x : float or array + The first cartesian coordinate. + y : float or array + The second cartesian coordinate. + z : float or array + The third cartesian coordinate. + + + """ + if not hasattr(r, 'unit'): + r = r * u.dimensionless_unscaled + if not hasattr(lat, 'unit'): + lat = lat * u.radian + if not hasattr(lon, 'unit'): + lon = lon * u.radian + + sph = SphericalRepresentation(distance=r, lat=lat, lon=lon) + cart = sph.represent_as(CartesianRepresentation) + + return cart.x, cart.y, cart.z + + +def get_sun(time): + """ + Determines the location of the sun at a given time (or times, if the input + is an array `~astropy.time.Time` object), in geocentric coordinates. + + Parameters + ---------- + time : `~astropy.time.Time` + The time(s) at which to compute the location of the sun. + + Returns + ------- + newsc : `~astropy.coordinates.SkyCoord` + The location of the sun as a `~astropy.coordinates.SkyCoord` in the + `~astropy.coordinates.GCRS` frame. + + + Notes + ----- + The algorithm for determining the sun/earth relative position is based + on the simplified version of VSOP2000 that is part of ERFA. Compared to + JPL's ephemeris, it should be good to about 4 km (in the Sun-Earth + vector) from 1900-2100 C.E., 8 km for the 1800-2200 span, and perhaps + 250 km over the 1000-3000. + + """ + earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(time, 'tdb')) + + # We have to manually do aberration because we're outputting directly into + # GCRS + earth_p = earth_pv_helio[..., 0, :] + earth_v = earth_pv_bary[..., 1, :] + + # convert barycentric velocity to units of c, but keep as array for passing in to erfa + earth_v /= c.to_value(u.au/u.d) + + dsun = np.sqrt(np.sum(earth_p**2, axis=-1)) + invlorentz = (1-np.sum(earth_v**2, axis=-1))**0.5 + properdir = erfa.ab(earth_p/dsun.reshape(dsun.shape + (1,)), + -earth_v, dsun, invlorentz) + + cartrep = CartesianRepresentation(x=-dsun*properdir[..., 0] * u.AU, + y=-dsun*properdir[..., 1] * u.AU, + z=-dsun*properdir[..., 2] * u.AU) + return SkyCoord(cartrep, frame=GCRS(obstime=time)) + + +def concatenate(coords): + """ + Combine multiple coordinate objects into a single + `~astropy.coordinates.SkyCoord`. + + "Coordinate objects" here mean frame objects with data, + `~astropy.coordinates.SkyCoord`, or representation objects. Currently, + they must all be in the same frame, but in a future version this may be + relaxed to allow inhomogenous sequences of objects. + + Parameters + ---------- + coords : sequence of coordinate objects + The objects to concatenate + + Returns + ------- + cskycoord : SkyCoord + A single sky coordinate with its data set to the concatenation of all + the elements in ``coords`` + """ + if getattr(coords, 'isscalar', False) or not isiterable(coords): + raise TypeError('The argument to concatenate must be iterable') + return SkyCoord(coords) + + +# global dictionary that caches repeatedly-needed info for get_constellation +_constellation_data = {} + + +def get_constellation(coord, short_name=False, constellation_list='iau'): + """ + Determines the constellation(s) a given coordinate object contains. + + Parameters + ---------- + coord : coordinate object + The object to determine the constellation of. + short_name : bool + If True, the returned names are the IAU-sanctioned abbreviated + names. Otherwise, full names for the constellations are used. + constellation_list : str + The set of constellations to use. Currently only ``'iau'`` is + supported, meaning the 88 "modern" constellations endorsed by the IAU. + + Returns + ------- + constellation : str or string array + If ``coords`` contains a scalar coordinate, returns the name of the + constellation. If it is an array coordinate object, it returns an array + of names. + + Notes + ----- + To determine which constellation a point on the sky is in, this precesses + to B1875, and then uses the Delporte boundaries of the 88 modern + constellations, as tabulated by + `Roman 1987 `_. + """ + if constellation_list != 'iau': + raise ValueError("only 'iau' us currently supported for constellation_list") + + # read the data files and cache them if they haven't been already + if not _constellation_data: + cdata = data.get_pkg_data_contents('data/constellation_data_roman87.dat') + ctable = ascii.read(cdata, names=['ral', 'rau', 'decl', 'name']) + cnames = data.get_pkg_data_contents('data/constellation_names.dat', encoding='UTF8') + cnames_short_to_long = dict([(l[:3], l[4:]) + for l in cnames.split('\n') + if not l.startswith('#')]) + cnames_long = np.array([cnames_short_to_long[nm] for nm in ctable['name']]) + + _constellation_data['ctable'] = ctable + _constellation_data['cnames_long'] = cnames_long + else: + ctable = _constellation_data['ctable'] + cnames_long = _constellation_data['cnames_long'] + + isscalar = coord.isscalar + + # if it is geocentric, we reproduce the frame but with the 1875 equinox, + # which is where the constellations are defined + constel_coord = coord.transform_to(PrecessedGeocentric(equinox='B1875')) + if isscalar: + rah = constel_coord.ra.ravel().hour + decd = constel_coord.dec.ravel().deg + else: + rah = constel_coord.ra.hour + decd = constel_coord.dec.deg + + constellidx = -np.ones(len(rah), dtype=int) + + notided = constellidx == -1 # should be all + for i, row in enumerate(ctable): + msk = (row['ral'] < rah) & (rah < row['rau']) & (decd > row['decl']) + constellidx[notided & msk] = i + notided = constellidx == -1 + if np.sum(notided) == 0: + break + else: + raise ValueError('Could not find constellation for coordinates {0}'.format(constel_coord[notided])) + + if short_name: + names = ctable['name'][constellidx] + else: + names = cnames_long[constellidx] + + if isscalar: + return names[0] + else: + return names diff --git a/astropy/coordinates/matching.py b/astropy/coordinates/matching.py new file mode 100644 index 0000000..b2d218f --- /dev/null +++ b/astropy/coordinates/matching.py @@ -0,0 +1,469 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module contains functions for matching coordinate catalogs. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from ..extern import six +from .representation import UnitSphericalRepresentation +from .. import units as u +from . import Angle + +__all__ = ['match_coordinates_3d', 'match_coordinates_sky', 'search_around_3d', + 'search_around_sky'] + + +def match_coordinates_3d(matchcoord, catalogcoord, nthneighbor=1, storekdtree='kdtree_3d'): + """ + Finds the nearest 3-dimensional matches of a coordinate or coordinates in + a set of catalog coordinates. + + This finds the 3-dimensional closest neighbor, which is only different + from the on-sky distance if ``distance`` is set in either ``matchcoord`` + or ``catalogcoord``. + + Parameters + ---------- + matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` + The coordinate(s) to match to the catalog. + catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` + The base catalog in which to search for matches. Typically this will + be a coordinate object that is an array (i.e., + ``catalogcoord.isscalar == False``) + nthneighbor : int, optional + Which closest neighbor to search for. Typically ``1`` is desired here, + as that is correct for matching one set of coordinates to another. + The next likely use case is ``2``, for matching a coordinate catalog + against *itself* (``1`` is inappropriate because each point will find + itself as the closest match). + storekdtree : bool or str, optional + If a string, will store the KD-Tree used for the computation + in the ``catalogcoord``, as in ``catalogcoord.cache`` with the + provided name. This dramatically speeds up subsequent calls with the + same catalog. If False, the KD-Tree is discarded after use. + + Returns + ------- + idx : integer array + Indices into ``catalogcoord`` to get the matched points for each + ``matchcoord``. Shape matches ``matchcoord``. + sep2d : `~astropy.coordinates.Angle` + The on-sky separation between the closest match for each ``matchcoord`` + and the ``matchcoord``. Shape matches ``matchcoord``. + dist3d : `~astropy.units.Quantity` + The 3D distance between the closest match for each ``matchcoord`` and + the ``matchcoord``. Shape matches ``matchcoord``. + + Notes + ----- + This function requires `SciPy `_ to be installed + or it will fail. + """ + if catalogcoord.isscalar or len(catalogcoord) < 1: + raise ValueError('The catalog for coordinate matching cannot be a ' + 'scalar or length-0.') + + kdt = _get_cartesian_kdtree(catalogcoord, storekdtree) + + # make sure coordinate systems match + matchcoord = matchcoord.transform_to(catalogcoord) + + # make sure units match + catunit = catalogcoord.cartesian.x.unit + matchxyz = matchcoord.cartesian.xyz.to(catunit) + + matchflatxyz = matchxyz.reshape((3, np.prod(matchxyz.shape) // 3)) + dist, idx = kdt.query(matchflatxyz.T, nthneighbor) + + if nthneighbor > 1: # query gives 1D arrays if k=1, 2D arrays otherwise + dist = dist[:, -1] + idx = idx[:, -1] + + sep2d = catalogcoord[idx].separation(matchcoord) + return idx.reshape(matchxyz.shape[1:]), sep2d, dist.reshape(matchxyz.shape[1:]) * catunit + + +def match_coordinates_sky(matchcoord, catalogcoord, nthneighbor=1, storekdtree='kdtree_sky'): + """ + Finds the nearest on-sky matches of a coordinate or coordinates in + a set of catalog coordinates. + + This finds the on-sky closest neighbor, which is only different from the + 3-dimensional match if ``distance`` is set in either ``matchcoord`` + or ``catalogcoord``. + + Parameters + ---------- + matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` + The coordinate(s) to match to the catalog. + catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` + The base catalog in which to search for matches. Typically this will + be a coordinate object that is an array (i.e., + ``catalogcoord.isscalar == False``) + nthneighbor : int, optional + Which closest neighbor to search for. Typically ``1`` is desired here, + as that is correct for matching one set of coordinates to another. + The next likely use case is ``2``, for matching a coordinate catalog + against *itself* (``1`` is inappropriate because each point will find + itself as the closest match). + storekdtree : bool or str, optional + If a string, will store the KD-Tree used for the computation + in the ``catalogcoord`` in ``catalogcoord.cache`` with the + provided name. This dramatically speeds up subsequent calls with the + same catalog. If False, the KD-Tree is discarded after use. + + Returns + ------- + idx : integer array + Indices into ``catalogcoord`` to get the matched points for each + ``matchcoord``. Shape matches ``matchcoord``. + sep2d : `~astropy.coordinates.Angle` + The on-sky separation between the closest match for each + ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. + dist3d : `~astropy.units.Quantity` + The 3D distance between the closest match for each ``matchcoord`` and + the ``matchcoord``. Shape matches ``matchcoord``. If either + ``matchcoord`` or ``catalogcoord`` don't have a distance, this is the 3D + distance on the unit sphere, rather than a true distance. + + Notes + ----- + This function requires `SciPy `_ to be installed + or it will fail. + """ + if catalogcoord.isscalar or len(catalogcoord) < 1: + raise ValueError('The catalog for coordinate matching cannot be a ' + 'scalar or length-0.') + + # send to catalog frame + newmatch = matchcoord.transform_to(catalogcoord) + + # strip out distance info + match_urepr = newmatch.data.represent_as(UnitSphericalRepresentation) + newmatch_u = newmatch.realize_frame(match_urepr) + + cat_urepr = catalogcoord.data.represent_as(UnitSphericalRepresentation) + newcat_u = catalogcoord.realize_frame(cat_urepr) + + # Check for a stored KD-tree on the passed-in coordinate. Normally it will + # have a distinct name from the "3D" one, so it's safe to use even though + # it's based on UnitSphericalRepresentation. + storekdtree = catalogcoord.cache.get(storekdtree, storekdtree) + + idx, sep2d, sep3d = match_coordinates_3d(newmatch_u, newcat_u, nthneighbor, storekdtree) + # sep3d is *wrong* above, because the distance information was removed, + # unless one of the catalogs doesn't have a real distance + if not (isinstance(catalogcoord.data, UnitSphericalRepresentation) or + isinstance(newmatch.data, UnitSphericalRepresentation)): + sep3d = catalogcoord[idx].separation_3d(newmatch) + + # update the kdtree on the actual passed-in coordinate + if isinstance(storekdtree, six.string_types): + catalogcoord.cache[storekdtree] = newcat_u.cache[storekdtree] + elif storekdtree is True: + # the old backwards-compatible name + catalogcoord.cache['kdtree'] = newcat_u.cache['kdtree'] + + return idx, sep2d, sep3d + + +def search_around_3d(coords1, coords2, distlimit, storekdtree='kdtree_3d'): + """ + Searches for pairs of points that are at least as close as a specified + distance in 3D space. + + This is intended for use on coordinate objects with arrays of coordinates, + not scalars. For scalar coordinates, it is better to use the + ``separation_3d`` methods. + + Parameters + ---------- + coords1 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` + The first set of coordinates, which will be searched for matches from + ``coords2`` within ``seplimit``. Cannot be a scalar coordinate. + coords2 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` + The second set of coordinates, which will be searched for matches from + ``coords1`` within ``seplimit``. Cannot be a scalar coordinate. + distlimit : `~astropy.units.Quantity` with distance units + The physical radius to search within. + storekdtree : bool or str, optional + If a string, will store the KD-Tree used in the search with the name + ``storekdtree`` in ``coords2.cache``. This speeds up subsequent calls + to this function. If False, the KD-Trees are not saved. + + Returns + ------- + idx1 : integer array + Indices into ``coords1`` that matches to the corresponding element of + ``idx2``. Shape matches ``idx2``. + idx2 : integer array + Indices into ``coords2`` that matches to the corresponding element of + ``idx1``. Shape matches ``idx1``. + sep2d : `~astropy.coordinates.Angle` + The on-sky separation between the coordinates. Shape matches ``idx1`` + and ``idx2``. + dist3d : `~astropy.units.Quantity` + The 3D distance between the coordinates. Shape matches ``idx1`` and + ``idx2``. The unit is that of ``coords1``. + + Notes + ----- + This function requires `SciPy `_ (>=0.12.0) + to be installed or it will fail. + + If you are using this function to search in a catalog for matches around + specific points, the convention is for ``coords2`` to be the catalog, and + ``coords1`` are the points to search around. While these operations are + mathematically the same if ``coords1`` and ``coords2`` are flipped, some of + the optimizations may work better if this convention is obeyed. + + In the current implementation, the return values are always sorted in the + same order as the ``coords1`` (so ``idx1`` is in ascending order). This is + considered an implementation detail, though, so it could change in a future + release. + """ + if not distlimit.isscalar: + raise ValueError('distlimit must be a scalar in search_around_3d') + + if coords1.isscalar or coords2.isscalar: + raise ValueError('One of the inputs to search_around_3d is a scalar. ' + 'search_around_3d is intended for use with array ' + 'coordinates, not scalars. Instead, use ' + '``coord1.separation_3d(coord2) < distlimit`` to find ' + 'the coordinates near a scalar coordinate.') + + if len(coords1) == 0 or len(coords2) == 0: + # Empty array input: return empty match + return (np.array([], dtype=np.int), np.array([], dtype=np.int), + Angle([], u.deg), + u.Quantity([], coords1.distance.unit)) + + kdt2 = _get_cartesian_kdtree(coords2, storekdtree) + cunit = coords2.cartesian.x.unit + + # we convert coord1 to match coord2's frame. We do it this way + # so that if the conversion does happen, the KD tree of coord2 at least gets + # saved. (by convention, coord2 is the "catalog" if that makes sense) + coords1 = coords1.transform_to(coords2) + + kdt1 = _get_cartesian_kdtree(coords1, storekdtree, forceunit=cunit) + + # this is the *cartesian* 3D distance that corresponds to the given angle + d = distlimit.to_value(cunit) + + idxs1 = [] + idxs2 = [] + for i, matches in enumerate(kdt1.query_ball_tree(kdt2, d)): + for match in matches: + idxs1.append(i) + idxs2.append(match) + idxs1 = np.array(idxs1, dtype=np.int) + idxs2 = np.array(idxs2, dtype=np.int) + + if idxs1.size == 0: + d2ds = Angle([], u.deg) + d3ds = u.Quantity([], coords1.distance.unit) + else: + d2ds = coords1[idxs1].separation(coords2[idxs2]) + d3ds = coords1[idxs1].separation_3d(coords2[idxs2]) + + return idxs1, idxs2, d2ds, d3ds + + +def search_around_sky(coords1, coords2, seplimit, storekdtree='kdtree_sky'): + """ + Searches for pairs of points that have an angular separation at least as + close as a specified angle. + + This is intended for use on coordinate objects with arrays of coordinates, + not scalars. For scalar coordinates, it is better to use the ``separation`` + methods. + + Parameters + ---------- + coords1 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` + The first set of coordinates, which will be searched for matches from + ``coords2`` within ``seplimit``. Cannot be a scalar coordinate. + coords2 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` + The second set of coordinates, which will be searched for matches from + ``coords1`` within ``seplimit``. Cannot be a scalar coordinate. + seplimit : `~astropy.units.Quantity` with angle units + The on-sky separation to search within. + storekdtree : bool or str, optional + If a string, will store the KD-Tree used in the search with the name + ``storekdtree`` in ``coords2.cache``. This speeds up subsequent calls + to this function. If False, the KD-Trees are not saved. + + Returns + ------- + idx1 : integer array + Indices into ``coords1`` that matches to the corresponding element of + ``idx2``. Shape matches ``idx2``. + idx2 : integer array + Indices into ``coords2`` that matches to the corresponding element of + ``idx1``. Shape matches ``idx1``. + sep2d : `~astropy.coordinates.Angle` + The on-sky separation between the coordinates. Shape matches ``idx1`` + and ``idx2``. + dist3d : `~astropy.units.Quantity` + The 3D distance between the coordinates. Shape matches ``idx1`` + and ``idx2``; the unit is that of ``coords1``. + If either ``coords1`` or ``coords2`` don't have a distance, + this is the 3D distance on the unit sphere, rather than a + physical distance. + + Notes + ----- + This function requires `SciPy `_ (>=0.12.0) + to be installed or it will fail. + + In the current implementation, the return values are always sorted in the + same order as the ``coords1`` (so ``idx1`` is in ascending order). This is + considered an implementation detail, though, so it could change in a future + release. + """ + if not seplimit.isscalar: + raise ValueError('seplimit must be a scalar in search_around_sky') + + if coords1.isscalar or coords2.isscalar: + raise ValueError('One of the inputs to search_around_sky is a scalar. ' + 'search_around_sky is intended for use with array ' + 'coordinates, not scalars. Instead, use ' + '``coord1.separation(coord2) < seplimit`` to find the ' + 'coordinates near a scalar coordinate.') + + if len(coords1) == 0 or len(coords2) == 0: + # Empty array input: return empty match + if coords2.distance.unit == u.dimensionless_unscaled: + distunit = u.dimensionless_unscaled + else: + distunit = coords1.distance.unit + return (np.array([], dtype=np.int), np.array([], dtype=np.int), + Angle([], u.deg), + u.Quantity([], distunit)) + + # we convert coord1 to match coord2's frame. We do it this way + # so that if the conversion does happen, the KD tree of coord2 at least gets + # saved. (by convention, coord2 is the "catalog" if that makes sense) + coords1 = coords1.transform_to(coords2) + + # strip out distance info + urepr1 = coords1.data.represent_as(UnitSphericalRepresentation) + ucoords1 = coords1.realize_frame(urepr1) + + kdt1 = _get_cartesian_kdtree(ucoords1, storekdtree) + + if storekdtree and coords2.cache.get(storekdtree): + # just use the stored KD-Tree + kdt2 = coords2.cache[storekdtree] + else: + # strip out distance info + urepr2 = coords2.data.represent_as(UnitSphericalRepresentation) + ucoords2 = coords2.realize_frame(urepr2) + + kdt2 = _get_cartesian_kdtree(ucoords2, storekdtree) + if storekdtree: + # save the KD-Tree in coords2, *not* ucoords2 + coords2.cache['kdtree' if storekdtree is True else storekdtree] = kdt2 + + # this is the *cartesian* 3D distance that corresponds to the given angle + r = (2 * np.sin(Angle(seplimit) / 2.0)).value + + idxs1 = [] + idxs2 = [] + for i, matches in enumerate(kdt1.query_ball_tree(kdt2, r)): + for match in matches: + idxs1.append(i) + idxs2.append(match) + idxs1 = np.array(idxs1, dtype=np.int) + idxs2 = np.array(idxs2, dtype=np.int) + + if idxs1.size == 0: + if coords2.distance.unit == u.dimensionless_unscaled: + distunit = u.dimensionless_unscaled + else: + distunit = coords1.distance.unit + d2ds = Angle([], u.deg) + d3ds = u.Quantity([], distunit) + else: + d2ds = coords1[idxs1].separation(coords2[idxs2]) + try: + d3ds = coords1[idxs1].separation_3d(coords2[idxs2]) + except ValueError: + # they don't have distances, so we just fall back on the cartesian + # distance, computed from d2ds + d3ds = 2 * np.sin(d2ds / 2.0) + + return idxs1, idxs2, d2ds, d3ds + + +def _get_cartesian_kdtree(coord, attrname_or_kdt='kdtree', forceunit=None): + """ + This is a utility function to retrieve (and build/cache, if necessary) + a 3D cartesian KD-Tree from various sorts of astropy coordinate objects. + + Parameters + ---------- + coord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` + The coordinates to build the KD-Tree for. + attrname_or_kdt : bool or str or KDTree + If a string, will store the KD-Tree used for the computation in the + ``coord``, in ``coord.cache`` with the provided name. If given as a + KD-Tree, it will just be used directly. + forceunit : unit or None + If a unit, the cartesian coordinates will convert to that unit before + being put in the KD-Tree. If None, whatever unit it's already in + will be used + + Returns + ------- + kdt : `~scipy.spatial.cKDTree` or `~scipy.spatial.KDTree` + The KD-Tree representing the 3D cartesian representation of the input + coordinates. + """ + from warnings import warn + + # without scipy this will immediately fail + from scipy import spatial + try: + KDTree = spatial.cKDTree + except Exception: + warn('C-based KD tree not found, falling back on (much slower) ' + 'python implementation') + KDTree = spatial.KDTree + + if attrname_or_kdt is True: # backwards compatibility for pre v0.4 + attrname_or_kdt = 'kdtree' + + # figure out where any cached KDTree might be + if isinstance(attrname_or_kdt, six.string_types): + kdt = coord.cache.get(attrname_or_kdt, None) + if kdt is not None and not isinstance(kdt, KDTree): + raise TypeError('The `attrname_or_kdt` "{0}" is not a scipy KD tree!'.format(attrname_or_kdt)) + elif isinstance(attrname_or_kdt, KDTree): + kdt = attrname_or_kdt + attrname_or_kdt = None + elif not attrname_or_kdt: + kdt = None + else: + raise TypeError('Invalid `attrname_or_kdt` argument for KD-Tree:' + + str(attrname_or_kdt)) + + if kdt is None: + # need to build the cartesian KD-tree for the catalog + if forceunit is None: + cartxyz = coord.cartesian.xyz + else: + cartxyz = coord.cartesian.xyz.to(forceunit) + flatxyz = cartxyz.reshape((3, np.prod(cartxyz.shape) // 3)) + kdt = KDTree(flatxyz.value.T) + + if attrname_or_kdt: + # cache the kdtree in `coord` + coord.cache[attrname_or_kdt] = kdt + + return kdt diff --git a/astropy/coordinates/matrix_utilities.py b/astropy/coordinates/matrix_utilities.py new file mode 100644 index 0000000..daa6612 --- /dev/null +++ b/astropy/coordinates/matrix_utilities.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module contains utililies used for constructing rotation matrices. +""" +from functools import reduce +import numpy as np +from ..utils.compat.numpy import matmul + +from .. import units as u +from .angles import Angle +from ..extern.six.moves import range + + +def matrix_product(*matrices): + """Matrix multiply all arguments together. + + Arguments should have dimension 2 or larger. Larger dimensional objects + are interpreted as stacks of matrices residing in the last two dimensions. + + This function mostly exists for readability: using `~numpy.matmul` + directly, one would have ``matmul(matmul(m1, m2), m3)``, etc. For even + better readability, one might consider using `~numpy.matrix` for the + arguments (so that one could write ``m1 * m2 * m3``), but then it is not + possible to handle stacks of matrices. Once only python >=3.5 is supported, + this function can be replaced by ``m1 @ m2 @ m3``. + """ + return reduce(matmul, matrices) + + +def matrix_transpose(matrix): + """Transpose a matrix or stack of matrices by swapping the last two axes. + + This function mostly exists for readability; seeing ``.swapaxes(-2, -1)`` + it is not that obvious that one does a transpose. Note that one cannot + use `~numpy.ndarray.T`, as this transposes all axes and thus does not + work for stacks of matrices. + """ + return matrix.swapaxes(-2, -1) + + +def rotation_matrix(angle, axis='z', unit=None): + """ + Generate matrices for rotation by some angle around some axis. + + Parameters + ---------- + angle : convertible to `Angle` + The amount of rotation the matrices should represent. Can be an array. + axis : str, or array-like + Either ``'x'``, ``'y'``, ``'z'``, or a (x,y,z) specifying the axis to + rotate about. If ``'x'``, ``'y'``, or ``'z'``, the rotation sense is + counterclockwise looking down the + axis (e.g. positive rotations obey + left-hand-rule). If given as an array, the last dimension should be 3; + it will be broadcast against ``angle``. + unit : UnitBase, optional + If ``angle`` does not have associated units, they are in this + unit. If neither are provided, it is assumed to be degrees. + + Returns + ------- + rmat : `numpy.matrix` + A unitary rotation matrix. + """ + if unit is None: + unit = u.degree + + angle = Angle(angle, unit=unit) + + s = np.sin(angle) + c = np.cos(angle) + + # use optimized implementations for x/y/z + try: + i = 'xyz'.index(axis) + except TypeError: + axis = np.asarray(axis) + axis = axis / np.sqrt((axis * axis).sum(axis=-1, keepdims=True)) + R = (axis[..., np.newaxis] * axis[..., np.newaxis, :] * + (1. - c)[..., np.newaxis, np.newaxis]) + + for i in range(0, 3): + R[..., i, i] += c + a1 = (i + 1) % 3 + a2 = (i + 2) % 3 + R[..., a1, a2] += axis[..., i] * s + R[..., a2, a1] -= axis[..., i] * s + + else: + a1 = (i + 1) % 3 + a2 = (i + 2) % 3 + R = np.zeros(angle.shape + (3, 3)) + R[..., i, i] = 1. + R[..., a1, a1] = c + R[..., a1, a2] = s + R[..., a2, a1] = -s + R[..., a2, a2] = c + + return R + + +def angle_axis(matrix): + """ + Angle of rotation and rotation axis for a given rotation matrix. + + Parameters + ---------- + matrix : array-like + A 3 x 3 unitary rotation matrix (or stack of matrices). + + Returns + ------- + angle : `Angle` + The angle of rotation. + axis : array + The (normalized) axis of rotation (with last dimension 3). + """ + m = np.asanyarray(matrix) + if m.shape[-2:] != (3, 3): + raise ValueError('matrix is not 3x3') + + axis = np.zeros(m.shape[:-1]) + axis[..., 0] = m[..., 2, 1] - m[..., 1, 2] + axis[..., 1] = m[..., 0, 2] - m[..., 2, 0] + axis[..., 2] = m[..., 1, 0] - m[..., 0, 1] + r = np.sqrt((axis * axis).sum(-1, keepdims=True)) + angle = np.arctan2(r[..., 0], + m[..., 0, 0] + m[..., 1, 1] + m[..., 2, 2] - 1.) + return Angle(angle, u.radian), -axis / r diff --git a/astropy/coordinates/name_resolve.py b/astropy/coordinates/name_resolve.py new file mode 100644 index 0000000..95630f4 --- /dev/null +++ b/astropy/coordinates/name_resolve.py @@ -0,0 +1,173 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module contains convenience functions for getting a coordinate object +for a named object by querying SESAME and getting the first returned result. +Note that this is intended to be a convenience, and is very simple. If you +need precise coordinates for an object you should find the appropriate +reference for that measurement and input the coordinates manually. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +# Standard library +import os +import re +import socket + +# Astropy +from ..extern.six.moves import urllib +from .. import units as u +from .sky_coordinate import SkyCoord +from ..utils import data +from ..utils.state import ScienceState + +__all__ = ["get_icrs_coordinates"] + + +class sesame_url(ScienceState): + """ + The URL(s) to Sesame's web-queryable database. + """ + _value = ["http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame/", + "http://vizier.cfa.harvard.edu/viz-bin/nph-sesame/"] + + @classmethod + def validate(cls, value): + # TODO: Implement me + return value + + +class sesame_database(ScienceState): + """ + This specifies the default database that SESAME will query when + using the name resolve mechanism in the coordinates + subpackage. Default is to search all databases, but this can be + 'all', 'simbad', 'ned', or 'vizier'. + """ + _value = 'all' + + @classmethod + def validate(cls, value): + if value not in ['all', 'simbad', 'ned', 'vizier']: + raise ValueError("Unknown database '{0}'".format(value)) + return value + + +class NameResolveError(Exception): + pass + + +def _parse_response(resp_data): + """ + Given a string response from SESAME, parse out the coordinates by looking + for a line starting with a J, meaning ICRS J2000 coordinates. + + Parameters + ---------- + resp_data : str + The string HTTP response from SESAME. + + Returns + ------- + ra : str + The string Right Ascension parsed from the HTTP response. + dec : str + The string Declination parsed from the HTTP response. + """ + + pattr = re.compile(r"%J\s*([0-9\.]+)\s*([\+\-\.0-9]+)") + matched = pattr.search(resp_data.decode('utf-8')) + + if matched is None: + return None, None + else: + ra, dec = matched.groups() + return ra, dec + + +def get_icrs_coordinates(name): + """ + Retrieve an ICRS object by using an online name resolving service to + retrieve coordinates for the specified name. By default, this will + search all available databases until a match is found. If you would like + to specify the database, use the science state + ``astropy.coordinates.name_resolve.sesame_database``. You can also + specify a list of servers to use for querying Sesame using the science + state ``astropy.coordinates.name_resolve.sesame_url``. This will try + each one in order until a valid response is returned. By default, this + list includes the main Sesame host and a mirror at vizier. The + configuration item `astropy.utils.data.Conf.remote_timeout` controls the + number of seconds to wait for a response from the server before giving + up. + + Parameters + ---------- + name : str + The name of the object to get coordinates for, e.g. ``'M42'``. + + Returns + ------- + coord : `astropy.coordinates.ICRS` object + The object's coordinates in the ICRS frame. + + """ + + database = sesame_database.get() + # The web API just takes the first letter of the database name + db = database.upper()[0] + + # Make sure we don't have duplicates in the url list + urls = [] + domains = [] + for url in sesame_url.get(): + domain = urllib.parse.urlparse(url).netloc + + # Check for duplicates + if domain not in domains: + domains.append(domain) + + # Add the query to the end of the url, add to url list + fmt_url = os.path.join(url, "{db}?{name}") + fmt_url = fmt_url.format(name=urllib.parse.quote(name), db=db) + urls.append(fmt_url) + + exceptions = [] + for url in urls: + try: + # Retrieve ascii name resolve data from CDS + resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout) + resp_data = resp.read() + break + except urllib.error.URLError as e: + exceptions.append(e) + continue + except socket.timeout as e: + # There are some cases where urllib2 does not catch socket.timeout + # especially while receiving response data on an already previously + # working request + exceptions.append(e) + continue + + # All Sesame URL's failed... + else: + messages = ["{url}: {e.reason}".format(url=url, e=e) + for url, e in zip(urls, exceptions)] + raise NameResolveError("All Sesame queries failed. Unable to " + "retrieve coordinates. See errors per URL " + "below: \n {}".format("\n".join(messages))) + + ra, dec = _parse_response(resp_data) + + if ra is None and dec is None: + if db == "A": + err = "Unable to find coordinates for name '{0}'".format(name) + else: + err = "Unable to find coordinates for name '{0}' in database {1}"\ + .format(name, database) + + raise NameResolveError(err) + + # Return SkyCoord object + sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs') + return sc diff --git a/astropy/coordinates/orbital_elements.py b/astropy/coordinates/orbital_elements.py new file mode 100644 index 0000000..f48626c --- /dev/null +++ b/astropy/coordinates/orbital_elements.py @@ -0,0 +1,247 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This module contains convenience functions implementing some of the +algorithms contained within Jean Meeus, 'Astronomical Algorithms', +second edition, 1998, Willmann-Bell. +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) +import numpy as np +from numpy.polynomial.polynomial import polyval + +from .. import units as u +from .. import _erfa as erfa +from . import ICRS, SkyCoord, GeocentricTrueEcliptic +from .builtin_frames.utils import get_jd12 + +__all__ = ["calc_moon"] + +# Meeus 1998: table 47.A +# D M M' F l r +_MOON_L_R = ( + (0, 0, 1, 0, 6288774, -20905355), + (2, 0, -1, 0, 1274027, -3699111), + (2, 0, 0, 0, 658314, -2955968), + (0, 0, 2, 0, 213618, -569925), + (0, 1, 0, 0, -185116, 48888), + (0, 0, 0, 2, -114332, -3149), + (2, 0, -2, 0, 58793, 246158), + (2, -1, -1, 0, 57066, -152138), + (2, 0, 1, 0, 53322, -170733), + (2, -1, 0, 0, 45758, -204586), + (0, 1, -1, 0, -40923, -129620), + (1, 0, 0, 0, -34720, 108743), + (0, 1, 1, 0, -30383, 104755), + (2, 0, 0, -2, 15327, 10321), + (0, 0, 1, 2, -12528, 0), + (0, 0, 1, -2, 10980, 79661), + (4, 0, -1, 0, 10675, -34782), + (0, 0, 3, 0, 10034, -23210), + (4, 0, -2, 0, 8548, -21636), + (2, 1, -1, 0, -7888, 24208), + (2, 1, 0, 0, -6766, 30824), + (1, 0, -1, 0, -5163, -8379), + (1, 1, 0, 0, 4987, -16675), + (2, -1, 1, 0, 4036, -12831), + (2, 0, 2, 0, 3994, -10445), + (4, 0, 0, 0, 3861, -11650), + (2, 0, -3, 0, 3665, 14403), + (0, 1, -2, 0, -2689, -7003), + (2, 0, -1, 2, -2602, 0), + (2, -1, -2, 0, 2390, 10056), + (1, 0, 1, 0, -2348, 6322), + (2, -2, 0, 0, 2236, -9884), + (0, 1, 2, 0, -2120, 5751), + (0, 2, 0, 0, -2069, 0), + (2, -2, -1, 0, 2048, -4950), + (2, 0, 1, -2, -1773, 4130), + (2, 0, 0, 2, -1595, 0), + (4, -1, -1, 0, 1215, -3958), + (0, 0, 2, 2, -1110, 0), + (3, 0, -1, 0, -892, 3258), + (2, 1, 1, 0, -810, 2616), + (4, -1, -2, 0, 759, -1897), + (0, 2, -1, 0, -713, -2117), + (2, 2, -1, 0, -700, 2354), + (2, 1, -2, 0, 691, 0), + (2, -1, 0, -2, 596, 0), + (4, 0, 1, 0, 549, -1423), + (0, 0, 4, 0, 537, -1117), + (4, -1, 0, 0, 520, -1571), + (1, 0, -2, 0, -487, -1739), + (2, 1, 0, -2, -399, 0), + (0, 0, 2, -2, -381, -4421), + (1, 1, 1, 0, 351, 0), + (3, 0, -2, 0, -340, 0), + (4, 0, -3, 0, 330, 0), + (2, -1, 2, 0, 327, 0), + (0, 2, 1, 0, -323, 1165), + (1, 1, -1, 0, 299, 0), + (2, 0, 3, 0, 294, 0), + (2, 0, -1, -2, 0, 8752) +) + +# Meeus 1998: table 47.B +# D M M' F b +_MOON_B = ( + (0, 0, 0, 1, 5128122), + (0, 0, 1, 1, 280602), + (0, 0, 1, -1, 277693), + (2, 0, 0, -1, 173237), + (2, 0, -1, 1, 55413), + (2, 0, -1, -1, 46271), + (2, 0, 0, 1, 32573), + (0, 0, 2, 1, 17198), + (2, 0, 1, -1, 9266), + (0, 0, 2, -1, 8822), + (2, -1, 0, -1, 8216), + (2, 0, -2, -1, 4324), + (2, 0, 1, 1, 4200), + (2, 1, 0, -1, -3359), + (2, -1, -1, 1, 2463), + (2, -1, 0, 1, 2211), + (2, -1, -1, -1, 2065), + (0, 1, -1, -1, -1870), + (4, 0, -1, -1, 1828), + (0, 1, 0, 1, -1794), + (0, 0, 0, 3, -1749), + (0, 1, -1, 1, -1565), + (1, 0, 0, 1, -1491), + (0, 1, 1, 1, -1475), + (0, 1, 1, -1, -1410), + (0, 1, 0, -1, -1344), + (1, 0, 0, -1, -1335), + (0, 0, 3, 1, 1107), + (4, 0, 0, -1, 1021), + (4, 0, -1, 1, 833), + # second column + (0, 0, 1, -3, 777), + (4, 0, -2, 1, 671), + (2, 0, 0, -3, 607), + (2, 0, 2, -1, 596), + (2, -1, 1, -1, 491), + (2, 0, -2, 1, -451), + (0, 0, 3, -1, 439), + (2, 0, 2, 1, 422), + (2, 0, -3, -1, 421), + (2, 1, -1, 1, -366), + (2, 1, 0, 1, -351), + (4, 0, 0, 1, 331), + (2, -1, 1, 1, 315), + (2, -2, 0, -1, 302), + (0, 0, 1, 3, -283), + (2, 1, 1, -1, -229), + (1, 1, 0, -1, 223), + (1, 1, 0, 1, 223), + (0, 1, -2, -1, -220), + (2, 1, -1, -1, -220), + (1, 0, 1, 1, -185), + (2, -1, -2, -1, 181), + (0, 1, 2, 1, -177), + (4, 0, -2, -1, 176), + (4, -1, -1, -1, 166), + (1, 0, 1, -1, -164), + (4, 0, 1, -1, 132), + (1, 0, -1, -1, -119), + (4, -1, 0, -1, 115), + (2, -2, 0, 1, 107) +) + +""" +Coefficients of polynomials for various terms: + +Lc : Mean longitude of Moon, w.r.t mean Equinox of date +D : Mean elongation of the Moon +M: Sun's mean anomaly +Mc : Moon's mean anomaly +F : Moon's argument of latitude (mean distance of Moon from its ascending node). +""" +_coLc = (2.18316448e+02, 4.81267881e+05, -1.57860000e-03, + 1.85583502e-06, -1.53388349e-08) +_coD = (2.97850192e+02, 4.45267111e+05, -1.88190000e-03, + 1.83194472e-06, -8.84447000e-09) +_coM = (3.57529109e+02, 3.59990503e+04, -1.53600000e-04, + 4.08329931e-08) +_coMc = (1.34963396e+02, 4.77198868e+05, 8.74140000e-03, + 1.43474081e-05, -6.79717238e-08) +_coF = (9.32720950e+01, 4.83202018e+05, -3.65390000e-03, + -2.83607487e-07, 1.15833246e-09) +_coA1 = (119.75, 131.849) +_coA2 = (53.09, 479264.290) +_coA3 = (313.45, 481266.484) +_coE = (1.0, -0.002516, -0.0000074) + + +def calc_moon(t): + """ + Lunar position model ELP2000-82 of (Chapront-Touze' and Chapront, 1983, 124, 50) + + This is the simplified version of Jean Meeus, Astronomical Algorithms, + second edition, 1998, Willmann-Bell. Meeus claims approximate accuracy of 10" + in longitude and 4" in latitude, with no specified time range. + + Tests against JPL ephemerides show accuracy of 10 arcseconds and 50 km over the + date range CE 1950-2050. + + Parameters + ----------- + t : `~astropy.time.Time` + Time of observation. + + Returns + -------- + skycoord : `~astropy.coordinates.SkyCoord` + ICRS Coordinate for the body + """ + # number of centuries since J2000.0. + # This should strictly speaking be in Ephemeris Time, but TDB or TT + # will introduce error smaller than intrinsic accuracy of algorithm. + T = (t.tdb.jyear-2000.0)/100. + + # constants that are needed for all calculations + Lc = u.Quantity(polyval(T, _coLc), u.deg) + D = u.Quantity(polyval(T, _coD), u.deg) + M = u.Quantity(polyval(T, _coM), u.deg) + Mc = u.Quantity(polyval(T, _coMc), u.deg) + F = u.Quantity(polyval(T, _coF), u.deg) + + A1 = u.Quantity(polyval(T, _coA1), u.deg) + A2 = u.Quantity(polyval(T, _coA2), u.deg) + A3 = u.Quantity(polyval(T, _coA3), u.deg) + E = polyval(T, _coE) + + suml = sumr = 0.0 + for DNum, MNum, McNum, FNum, LFac, RFac in _MOON_L_R: + corr = E ** abs(MNum) + suml += LFac*corr*np.sin(D*DNum+M*MNum+Mc*McNum+F*FNum) + sumr += RFac*corr*np.cos(D*DNum+M*MNum+Mc*McNum+F*FNum) + + sumb = 0.0 + for DNum, MNum, McNum, FNum, BFac in _MOON_B: + corr = E ** abs(MNum) + sumb += BFac*corr*np.sin(D*DNum+M*MNum+Mc*McNum+F*FNum) + + suml += (3958*np.sin(A1) + 1962*np.sin(Lc-F) + 318*np.sin(A2)) + sumb += (-2235*np.sin(Lc) + 382*np.sin(A3) + 175*np.sin(A1-F) + + 175*np.sin(A1+F) + 127*np.sin(Lc-Mc) - 115*np.sin(Lc+Mc)) + + # ensure units + suml = suml*u.microdegree + sumb = sumb*u.microdegree + + # nutation of longitude + jd1, jd2 = get_jd12(t, 'tt') + nut, _ = erfa.nut06a(jd1, jd2) + nut = nut*u.rad + + # calculate ecliptic coordinates + lon = Lc + suml + nut + lat = sumb + dist = (385000.56+sumr/1000)*u.km + + # Meeus algorithm gives GeocentricTrueEcliptic coordinates + ecliptic_coo = GeocentricTrueEcliptic(lon, lat, distance=dist, + equinox=t) + + return SkyCoord(ecliptic_coo.transform_to(ICRS)) diff --git a/astropy/coordinates/representation.py b/astropy/coordinates/representation.py new file mode 100644 index 0000000..ed52dfe --- /dev/null +++ b/astropy/coordinates/representation.py @@ -0,0 +1,2804 @@ +""" +In this module, we define the coordinate representation classes, which are +used to represent low-level cartesian, spherical, cylindrical, and other +coordinates. +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import abc +import functools +import operator +from collections import OrderedDict +import inspect + +import numpy as np +import astropy.units as u + +from .angles import Angle, Longitude, Latitude +from .distances import Distance +from ..extern import six +from ..utils import ShapedLikeNDArray, classproperty + +from ..utils.misc import InheritDocstrings +from ..utils.compat import NUMPY_LT_1_12, NUMPY_LT_1_14 +from ..utils.compat.numpy import broadcast_arrays, broadcast_to + + +__all__ = ["BaseRepresentationOrDifferential", "BaseRepresentation", + "CartesianRepresentation", "SphericalRepresentation", + "UnitSphericalRepresentation", "RadialRepresentation", + "PhysicsSphericalRepresentation", "CylindricalRepresentation", + "BaseDifferential", "CartesianDifferential", + "BaseSphericalDifferential", "BaseSphericalCosLatDifferential", + "SphericalDifferential", "SphericalCosLatDifferential", + "UnitSphericalDifferential", "UnitSphericalCosLatDifferential", + "RadialDifferential", "CylindricalDifferential", + "PhysicsSphericalDifferential"] + +# Module-level dict mapping representation string alias names to classes. +# This is populated by the metaclass init so all representation and differential +# classes get registered automatically. +REPRESENTATION_CLASSES = {} +DIFFERENTIAL_CLASSES = {} + + +def _array2string(values, prefix=''): + # Mimic numpy >=1.12 array2string, in which structured arrays are + # typeset taking into account all printoptions. + kwargs = {'separator': ', ', 'prefix': prefix} + if NUMPY_LT_1_12: # pragma: no cover + # Mimic StructureFormat from numpy >=1.12 assuming float-only data. + from numpy.core.arrayprint import FloatFormat + opts = np.get_printoptions() + format_functions = [FloatFormat(np.atleast_1d(values[component]).ravel(), + precision=opts['precision'], + suppress_small=opts['suppress']) + for component in values.dtype.names] + + def fmt(x): + return '({})'.format(', '.join(format_function(field) + for field, format_function in + zip(x, format_functions))) + # Before 1.12, structures arrays were set as "numpystr", + # so that is the formmater we need to replace. + kwargs['formatter'] = {'numpystr': fmt} + kwargs['style'] = fmt + + else: + kwargs['formatter'] = {} + if NUMPY_LT_1_14: # in 1.14, style is no longer used (and deprecated) + kwargs['style'] = repr + + return np.array2string(values, **kwargs) + + +def _combine_xyz(x, y, z, xyz_axis=0): + """ + Combine components ``x``, ``y``, ``z`` into a single Quantity array. + + Parameters + ---------- + x, y, z : `~astropy.units.Quantity` + The individual x, y, and z components. + xyz_axis : int, optional + The axis in the final array along which the x, y, z components + should be stored (default: 0). + + Returns + ------- + xyz : `~astropy.units.Quantity` + With dimension 3 along ``xyz_axis``, i.e., using the default of ``0``, + the shape will be ``(3,) + x.shape``. + """ + # Add new axis in x, y, z so one can concatenate them around it. + # NOTE: just use np.stack once our minimum numpy version is 1.10. + result_ndim = x.ndim + 1 + if not -result_ndim <= xyz_axis < result_ndim: + raise IndexError('xyz_axis {0} out of bounds [-{1}, {1})' + .format(xyz_axis, result_ndim)) + + if xyz_axis < 0: + xyz_axis += result_ndim + + # Get x, y, z to the same units (this is very fast for identical units) + # since np.concatenate cannot deal with quantity. + cls = x.__class__ + y = cls(y, x.unit, copy=False) + z = cls(z, x.unit, copy=False) + + sh = x.shape + sh = sh[:xyz_axis] + (1,) + sh[xyz_axis:] + xyz_value = np.concatenate([c.reshape(sh).value for c in (x, y, z)], + axis=xyz_axis) + return cls(xyz_value, unit=x.unit, copy=False) + + +class BaseRepresentationOrDifferential(ShapedLikeNDArray): + """3D coordinate representations and differentials. + + Parameters + ---------- + comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass + The components of the 3D point or differential. The names are the + keys and the subclasses the values of the ``attr_classes`` attribute. + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + + # Ensure multiplication/division with ndarray or Quantity doesn't lead to + # object arrays. + __array_priority__ = 50000 + + def __init__(self, *args, **kwargs): + # make argument a list, so we can pop them off. + args = list(args) + components = self.components + attrs = [] + for component in components: + try: + attrs.append(args.pop(0) if args else kwargs.pop(component)) + except KeyError: + raise TypeError('__init__() missing 1 required positional ' + 'argument: {0!r}'.format(component)) + + copy = args.pop(0) if args else kwargs.pop('copy', True) + + if args: + raise TypeError('unexpected arguments: {0}'.format(args)) + + if kwargs: + for component in components: + if component in kwargs: + raise TypeError("__init__() got multiple values for " + "argument {0!r}".format(component)) + + raise TypeError('unexpected keyword arguments: {0}'.format(kwargs)) + + # Pass attributes through the required initializing classes. + attrs = [self.attr_classes[component](attr, copy=copy) + for component, attr in zip(components, attrs)] + try: + attrs = broadcast_arrays(*attrs, subok=True) + except ValueError: + if len(components) <= 2: + c_str = ' and '.join(components) + else: + c_str = ', '.join(components[:2]) + ', and ' + components[2] + raise ValueError("Input parameters {0} cannot be broadcast" + .format(c_str)) + # Set private attributes for the attributes. (If not defined explicitly + # on the class, the metaclass will define properties to access these.) + for component, attr in zip(components, attrs): + setattr(self, '_' + component, attr) + + @classmethod + def get_name(cls): + """Name of the representation or differential. + + In lower case, with any trailing 'representation' or 'differential' + removed. (E.g., 'spherical' for + `~astropy.coordinates.SphericalRepresentation` or + `~astropy.coordinates.SphericalDifferential`.) + """ + name = cls.__name__.lower() + + if name.endswith('representation'): + name = name[:-14] + elif name.endswith('differential'): + name = name[:-12] + + return name + + # The two methods that any subclass has to define. + # Should be replaced by abstractclassmethod once we support only PY3 + @abc.abstractmethod + def from_cartesian(self, other): + """Create a representation of this class from a supplied Cartesian one. + + Parameters + ---------- + other : `CartesianRepresentation` + The representation to turn into this class + + Returns + ------- + representation : object of this class + A new representation of this class's type. + """ + # Note: the above docstring gets overridden for differentials. + raise NotImplementedError() + + @abc.abstractmethod + def to_cartesian(self): + """Convert the representation to its Cartesian form. + + Note that any differentials get dropped. + + Returns + ------- + cartrepr : `CartesianRepresentation` + The representation in Cartesian form. + """ + # Note: the above docstring gets overridden for differentials. + raise NotImplementedError() + + @property + def components(self): + """A tuple with the in-order names of the coordinate components.""" + return tuple(self.attr_classes) + + def _apply(self, method, *args, **kwargs): + """Create a new representation or differential with ``method`` applied + to the component data. + + In typical usage, the method is any of the shape-changing methods for + `~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those + picking particular elements (``__getitem__``, ``take``, etc.), which + are all defined in `~astropy.utils.misc.ShapedLikeNDArray`. It will be + applied to the underlying arrays (e.g., ``x``, ``y``, and ``z`` for + `~astropy.coordinates.CartesianRepresentation`), with the results used + to create a new instance. + + Internally, it is also used to apply functions to the components + (in particular, `~numpy.broadcast_to`). + + Parameters + ---------- + method : str or callable + If str, it is the name of a method that is applied to the internal + ``components``. If callable, the function is applied. + args : tuple + Any positional arguments for ``method``. + kwargs : dict + Any keyword arguments for ``method``. + """ + if callable(method): + apply_method = lambda array: method(array, *args, **kwargs) + else: + apply_method = operator.methodcaller(method, *args, **kwargs) + + return self.__class__(*[apply_method(getattr(self, component)) + for component in self.components], copy=False) + + @property + def shape(self): + """The shape of the instance and underlying arrays. + + Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a + tuple. Note that if different instances share some but not all + underlying data, setting the shape of one instance can make the other + instance unusable. Hence, it is strongly recommended to get new, + reshaped instances with the ``reshape`` method. + + Raises + ------ + AttributeError + If the shape of any of the components cannot be changed without the + arrays being copied. For these cases, use the ``reshape`` method + (which copies any arrays that cannot be reshaped in-place). + """ + return getattr(self, self.components[0]).shape + + @shape.setter + def shape(self, shape): + # We keep track of arrays that were already reshaped since we may have + # to return those to their original shape if a later shape-setting + # fails. (This can happen since coordinates are broadcast together.) + reshaped = [] + oldshape = self.shape + for component in self.components: + val = getattr(self, component) + if val.size > 1: + try: + val.shape = shape + except AttributeError: + for val2 in reshaped: + val2.shape = oldshape + raise + else: + reshaped.append(val) + + # Required to support multiplication and division, and defined by the base + # representation and differential classes. + @abc.abstractmethod + def _scale_operation(self, op, *args): + raise NotImplementedError() + + def __mul__(self, other): + return self._scale_operation(operator.mul, other) + + def __rmul__(self, other): + return self.__mul__(other) + + def __truediv__(self, other): + return self._scale_operation(operator.truediv, other) + + def __div__(self, other): # pragma: py2 + return self._scale_operation(operator.truediv, other) + + def __neg__(self): + return self._scale_operation(operator.neg) + + # Follow numpy convention and make an independent copy. + def __pos__(self): + return self.copy() + + # Required to support addition and subtraction, and defined by the base + # representation and differential classes. + @abc.abstractmethod + def _combine_operation(self, op, other, reverse=False): + raise NotImplementedError() + + def __add__(self, other): + return self._combine_operation(operator.add, other) + + def __radd__(self, other): + return self._combine_operation(operator.add, other, reverse=True) + + def __sub__(self, other): + return self._combine_operation(operator.sub, other) + + def __rsub__(self, other): + return self._combine_operation(operator.sub, other, reverse=True) + + # The following are used for repr and str + @property + def _values(self): + """Turn the coordinates into a record array with the coordinate values. + + The record array fields will have the component names. + """ + # The "str(c)" is needed for PY2; it can be removed for astropy 3.0. + coo_items = [(str(c), getattr(self, c)) for c in self.components] + result = np.empty(self.shape, [(c, coo.dtype) for c, coo in coo_items]) + for c, coo in coo_items: + result[c] = coo.value + return result + + @property + def _units(self): + """Return a dictionary with the units of the coordinate components.""" + return dict([(component, getattr(self, component).unit) + for component in self.components]) + + @property + def _unitstr(self): + units_set = set(self._units.values()) + if len(units_set) == 1: + unitstr = units_set.pop().to_string() + else: + unitstr = '({0})'.format( + ', '.join([self._units[component].to_string() + for component in self.components])) + return unitstr + + def __str__(self): + return '{0} {1:s}'.format(_array2string(self._values), self._unitstr) + + def __repr__(self): + prefixstr = ' ' + arrstr = _array2string(self._values, prefix=prefixstr) + + diffstr = '' + if getattr(self, 'differentials', None): + diffstr = '\n (has differentials w.r.t.: {0})'.format( + ', '.join([repr(key) for key in self.differentials.keys()])) + + unitstr = ('in ' + self._unitstr) if self._unitstr else '[dimensionless]' + return '<{0} ({1}) {2:s}\n{3}{4}{5}>'.format( + self.__class__.__name__, ', '.join(self.components), + unitstr, prefixstr, arrstr, diffstr) + + +def _make_getter(component): + """Make an attribute getter for use in a property. + + Parameters + ---------- + component : str + The name of the component that should be accessed. This assumes the + actual value is stored in an attribute of that name prefixed by '_'. + """ + # This has to be done in a function to ensure the reference to component + # is not lost/redirected. + component = '_' + component + + def get_component(self): + return getattr(self, component) + return get_component + + +# Need to also subclass ABCMeta rather than type, so that this meta class can +# be combined with a ShapedLikeNDArray subclass (which is an ABC). Without it: +# "TypeError: metaclass conflict: the metaclass of a derived class must be a +# (non-strict) subclass of the metaclasses of all its bases" +class MetaBaseRepresentation(InheritDocstrings, abc.ABCMeta): + def __init__(cls, name, bases, dct): + super(MetaBaseRepresentation, cls).__init__(name, bases, dct) + + # Register representation name (except for BaseRepresentation) + if cls.__name__ == 'BaseRepresentation': + return + + if 'attr_classes' not in dct: + raise NotImplementedError('Representations must have an ' + '"attr_classes" class attribute.') + + repr_name = cls.get_name() + + if repr_name in REPRESENTATION_CLASSES: + raise ValueError("Representation class {0} already defined" + .format(repr_name)) + + REPRESENTATION_CLASSES[repr_name] = cls + + # define getters for any component that does not yet have one. + for component in cls.attr_classes: + if not hasattr(cls, component): + setattr(cls, component, + property(_make_getter(component), + doc=("The '{0}' component of the points(s)." + .format(component)))) + + +@six.add_metaclass(MetaBaseRepresentation) +class BaseRepresentation(BaseRepresentationOrDifferential): + """Base for representing a point in a 3D coordinate system. + + Parameters + ---------- + comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass + The components of the 3D points. The names are the keys and the + subclasses the values of the ``attr_classes`` attribute. + differentials : dict, `BaseDifferential`, optional + Any differential classes that should be associated with this + representation. The input must either be a single `BaseDifferential` + subclass instance, or a dictionary with keys set to a string + representation of the SI unit with which the differential (derivative) + is taken. For example, for a velocity differential on a positional + representation, the key would be ``'s'`` for seconds, indicating that + the derivative is a time derivative. + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + + Notes + ----- + All representation classes should subclass this base representation class, + and define an ``attr_classes`` attribute, an `~collections.OrderedDict` + which maps component names to the class that creates them. They must also + define a ``to_cartesian`` method and a ``from_cartesian`` class method. By + default, transformations are done via the cartesian system, but classes + that want to define a smarter transformation path can overload the + ``represent_as`` method. If one wants to use an associated differential + class, one should also define ``unit_vectors`` and ``scale_factors`` + methods (see those methods for details). Finally, classes can also define a + ``recommended_units`` dictionary, which maps component names to the units + they are best presented to users in (this is used only in representations + of coordinates, and may be overridden by frame classes). + """ + + recommended_units = {} # subclasses can override + + def __init__(self, *args, **kwargs): + # Handle any differentials passed in. + differentials = kwargs.pop('differentials', None) + super(BaseRepresentation, self).__init__(*args, **kwargs) + self._differentials = self._validate_differentials(differentials) + + def _validate_differentials(self, differentials): + """ + Validate that the provided differentials are appropriate for this + representation and recast/reshape as necessary and then return. + + Note that this does *not* set the differentials on + ``self._differentials``, but rather leaves that for the caller. + """ + + # Now handle the actual validation of any specified differential classes + if differentials is None: + differentials = dict() + + elif isinstance(differentials, BaseDifferential): + # We can't handle auto-determining the key for this combo + if (isinstance(differentials, RadialDifferential) and + isinstance(self, UnitSphericalRepresentation)): + raise ValueError("To attach a RadialDifferential to a " + "UnitSphericalRepresentation, you must supply " + "a dictionary with an appropriate key.") + + key = differentials._get_deriv_key(self) + differentials = {key: differentials} + + for key in differentials: + try: + diff = differentials[key] + except TypeError: + raise TypeError("'differentials' argument must be a " + "dictionary-like object") + + diff._check_base(self) + + if (isinstance(diff, RadialDifferential) and + isinstance(self, UnitSphericalRepresentation)): + # We trust the passing of a key for a RadialDifferential + # attached to a UnitSphericalRepresentation because it will not + # have a paired component name (UnitSphericalRepresentation has + # no .distance) to automatically determine the expected key + pass + + else: + expected_key = diff._get_deriv_key(self) + if key != expected_key: + raise ValueError("For differential object '{0}', expected " + "unit key = '{1}' but received key = '{2}'" + .format(repr(diff), expected_key, key)) + + # For now, we are very rigid: differentials must have the same shape + # as the representation. This makes it easier to handle __getitem__ + # and any other shape-changing operations on representations that + # have associated differentials + if diff.shape != self.shape: + # TODO: message of IncompatibleShapeError is not customizable, + # so use a valueerror instead? + raise ValueError("Shape of differentials must be the same " + "as the shape of the representation ({0} vs " + "{1})".format(diff.shape, self.shape)) + + return differentials + + def _raise_if_has_differentials(self, op_name): + """ + Used to raise a consistent exception for any operation that is not + supported when a representation has differentials attached. + """ + if self.differentials: + raise TypeError("Operation '{0}' is not supported when " + "differentials are attached to a {1}." + .format(op_name, self.__class__.__name__)) + + @property + def _compatible_differentials(self): + return [DIFFERENTIAL_CLASSES[self.get_name()]] + + @property + def differentials(self): + """A dictionary of differential class instances. + + The keys of this dictionary must be a string representation of the SI + unit with which the differential (derivative) is taken. For example, for + a velocity differential on a positional representation, the key would be + ``'s'`` for seconds, indicating that the derivative is a time + derivative. + """ + return self._differentials + + # We do not make unit_vectors and scale_factors abstract methods, since + # they are only necessary if one also defines an associated Differential. + # Also, doing so would break pre-differential representation subclasses. + def unit_vectors(self): + r"""Cartesian unit vectors in the direction of each component. + + Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`, + a change in one component of :math:`\delta c` corresponds to a change + in representation of :math:`\delta c \times f_c \times \hat{e}_c`. + + Returns + ------- + unit_vectors : dict of `CartesianRepresentation` + The keys are the component names. + """ + raise NotImplementedError("{} has not implemented unit vectors" + .format(type(self))) + + def scale_factors(self): + r"""Scale factors for each component's direction. + + Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`, + a change in one component of :math:`\delta c` corresponds to a change + in representation of :math:`\delta c \times f_c \times \hat{e}_c`. + + Returns + ------- + scale_factors : dict of `~astropy.units.Quantity` + The keys are the component names. + """ + raise NotImplementedError("{} has not implemented scale factors." + .format(type(self))) + + def _re_represent_differentials(self, new_rep, differential_class): + """Re-represent the differentials to the specified classes. + + This returns a new dictionary with the same keys but with the + attached differentials converted to the new differential classes. + """ + if differential_class is None: + return dict() + + if not self.differentials and differential_class: + raise ValueError("No differentials associated with this " + "representation!") + + elif (len(self.differentials) == 1 and + inspect.isclass(differential_class) and + issubclass(differential_class, BaseDifferential)): + # TODO: is there a better way to do this? + differential_class = { + list(self.differentials.keys())[0]: differential_class + } + + elif set(differential_class.keys()) != set(self.differentials.keys()): + ValueError("Desired differential classes must be passed in " + "as a dictionary with keys equal to a string " + "representation of the unit of the derivative " + "for each differential stored with this " + "representation object ({0})" + .format(self.differentials)) + + new_diffs = dict() + for k in self.differentials: + diff = self.differentials[k] + try: + new_diffs[k] = diff.represent_as(differential_class[k], + base=self) + except Exception: + if (differential_class[k] not in + new_rep._compatible_differentials): + raise TypeError("Desired differential class {0} is not " + "compatible with the desired " + "representation class {1}" + .format(differential_class[k], + new_rep.__class__)) + else: + raise + + return new_diffs + + def represent_as(self, other_class, differential_class=None): + """Convert coordinates to another representation. + + If the instance is of the requested class, it is returned unmodified. + By default, conversion is done via cartesian coordinates. + + Parameters + ---------- + other_class : `~astropy.coordinates.BaseRepresentation` subclass + The type of representation to turn the coordinates into. + differential_class : dict of `~astropy.coordinates.BaseDifferential`, optional + Classes in which the differentials should be represented. + Can be a single class if only a single differential is attached, + otherwise it should be a `dict` keyed by the same keys as the + differentials. + """ + if other_class is self.__class__ and not differential_class: + return self.without_differentials() + + else: + if isinstance(other_class, six.string_types): + raise ValueError("Input to a representation's represent_as " + "must be a class, not a string. For " + "strings, use frame objects") + + # The default is to convert via cartesian coordinates + new_rep = other_class.from_cartesian(self.to_cartesian()) + + new_rep._differentials = self._re_represent_differentials( + new_rep, differential_class) + + return new_rep + + def with_differentials(self, differentials): + """ + Create a new representation with the same positions as this + representation, but with these new differentials. + + Differential keys that already exist in this object's differential dict + are overwritten. + + Parameters + ---------- + differentials : Sequence of `~astropy.coordinates.BaseDifferential` + The differentials for the new representation to have. + + Returns + ------- + newrepr + A copy of this representation, but with the ``differentials`` as + its differentials. + """ + if not differentials: + return self + + args = [getattr(self, component) for component in self.components] + + # We shallow copy the differentials dictionary so we don't update the + # current object's dictionary when adding new keys + new_rep = self.__class__(*args, differentials=self.differentials.copy(), + copy=False) + new_rep._differentials.update( + new_rep._validate_differentials(differentials)) + + return new_rep + + def without_differentials(self): + """Return a copy of the representation without attached differentials. + + Returns + ------- + newrepr + A shallow copy of this representation, without any differentials. + If no differentials were present, no copy is made. + """ + + if not self._differentials: + return self + + args = [getattr(self, component) for component in self.components] + return self.__class__(*args, copy=False) + + @classmethod + def from_representation(cls, representation): + """Create a new instance of this representation from another one. + + Parameters + ---------- + representation : `~astropy.coordinates.BaseRepresentation` instance + The presentation that should be converted to this class. + """ + return representation.represent_as(cls) + + def _apply(self, method, *args, **kwargs): + """Create a new representation with ``method`` applied to the component + data. + + This is not a simple inherit from ``BaseRepresentationOrDifferential`` + because we need to call ``._apply()`` on any associated differential + classes. + + See docstring for `BaseRepresentationOrDifferential._apply`. + + Parameters + ---------- + method : str or callable + If str, it is the name of a method that is applied to the internal + ``components``. If callable, the function is applied. + args : tuple + Any positional arguments for ``method``. + kwargs : dict + Any keyword arguments for ``method``. + + """ + rep = super(BaseRepresentation, self)._apply(method, *args, **kwargs) + + rep._differentials = dict( + [(k, diff._apply(method, *args, **kwargs)) + for k, diff in self._differentials.items()]) + return rep + + def _scale_operation(self, op, *args): + """Scale all non-angular components, leaving angular ones unchanged. + + Parameters + ---------- + op : `~operator` callable + Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc. + *args + Any arguments required for the operator (typically, what is to + be multiplied with, divided by). + """ + + self._raise_if_has_differentials(op.__name__) + + results = [] + for component, cls in self.attr_classes.items(): + value = getattr(self, component) + if issubclass(cls, Angle): + results.append(value) + else: + results.append(op(value, *args)) + + # try/except catches anything that cannot initialize the class, such + # as operations that returned NotImplemented or a representation + # instead of a quantity (as would happen for, e.g., rep * rep). + try: + return self.__class__(*results) + except Exception: + return NotImplemented + + def _combine_operation(self, op, other, reverse=False): + """Combine two representation. + + By default, operate on the cartesian representations of both. + + Parameters + ---------- + op : `~operator` callable + Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc. + other : `~astropy.coordinates.BaseRepresentation` instance + The other representation. + reverse : bool + Whether the operands should be reversed (e.g., as we got here via + ``self.__rsub__`` because ``self`` is a subclass of ``other``). + """ + self._raise_if_has_differentials(op.__name__) + + result = self.to_cartesian()._combine_operation(op, other, reverse) + if result is NotImplemented: + return NotImplemented + else: + return self.from_cartesian(result) + + # We need to override this setter to support differentials + @BaseRepresentationOrDifferential.shape.setter + def shape(self, shape): + orig_shape = self.shape + + # See: https://stackoverflow.com/questions/3336767/ for an example + BaseRepresentationOrDifferential.shape.fset(self, shape) + + # also try to perform shape-setting on any associated differentials + try: + for k in self.differentials: + self.differentials[k].shape = shape + except Exception: + BaseRepresentationOrDifferential.shape.fset(self, orig_shape) + for k in self.differentials: + self.differentials[k].shape = orig_shape + + raise + + def norm(self): + """Vector norm. + + The norm is the standard Frobenius norm, i.e., the square root of the + sum of the squares of all components with non-angular units. + + Note that any associated differentials will be dropped during this + operation. + + Returns + ------- + norm : `astropy.units.Quantity` + Vector norm, with the same shape as the representation. + """ + return np.sqrt(functools.reduce( + operator.add, (getattr(self, component)**2 + for component, cls in self.attr_classes.items() + if not issubclass(cls, Angle)))) + + def mean(self, *args, **kwargs): + """Vector mean. + + Averaging is done by converting the representation to cartesian, and + taking the mean of the x, y, and z components. The result is converted + back to the same representation as the input. + + Refer to `~numpy.mean` for full documentation of the arguments, noting + that ``axis`` is the entry in the ``shape`` of the representation, and + that the ``out`` argument cannot be used. + + Returns + ------- + mean : representation + Vector mean, in the same representation as that of the input. + """ + self._raise_if_has_differentials('mean') + return self.from_cartesian(self.to_cartesian().mean(*args, **kwargs)) + + def sum(self, *args, **kwargs): + """Vector sum. + + Adding is done by converting the representation to cartesian, and + summing the x, y, and z components. The result is converted back to the + same representation as the input. + + Refer to `~numpy.sum` for full documentation of the arguments, noting + that ``axis`` is the entry in the ``shape`` of the representation, and + that the ``out`` argument cannot be used. + + Returns + ------- + sum : representation + Vector sum, in the same representation as that of the input. + """ + self._raise_if_has_differentials('sum') + return self.from_cartesian(self.to_cartesian().sum(*args, **kwargs)) + + def dot(self, other): + """Dot product of two representations. + + The calculation is done by converting both ``self`` and ``other`` + to `~astropy.coordinates.CartesianRepresentation`. + + Note that any associated differentials will be dropped during this + operation. + + Parameters + ---------- + other : `~astropy.coordinates.BaseRepresentation` + The representation to take the dot product with. + + Returns + ------- + dot_product : `~astropy.units.Quantity` + The sum of the product of the x, y, and z components of the + cartesian representations of ``self`` and ``other``. + """ + return self.to_cartesian().dot(other) + + def cross(self, other): + """Vector cross product of two representations. + + The calculation is done by converting both ``self`` and ``other`` + to `~astropy.coordinates.CartesianRepresentation`, and converting the + result back to the type of representation of ``self``. + + Parameters + ---------- + other : representation + The representation to take the cross product with. + + Returns + ------- + cross_product : representation + With vectors perpendicular to both ``self`` and ``other``, in the + same type of representation as ``self``. + """ + self._raise_if_has_differentials('cross') + return self.from_cartesian(self.to_cartesian().cross(other)) + + +class CartesianRepresentation(BaseRepresentation): + """ + Representation of points in 3D cartesian coordinates. + + Parameters + ---------- + x, y, z : `~astropy.units.Quantity` or array + The x, y, and z coordinates of the point(s). If ``x``, ``y``, and ``z`` + have different shapes, they should be broadcastable. If not quantity, + ``unit`` should be set. If only ``x`` is given, it is assumed that it + contains an array with the 3 coordinates stored along ``xyz_axis``. + unit : `~astropy.units.Unit` or str + If given, the coordinates will be converted to this unit (or taken to + be in this unit if not given. + xyz_axis : int, optional + The axis along which the coordinates are stored when a single array is + provided rather than distinct ``x``, ``y``, and ``z`` (default: 0). + + differentials : dict, `CartesianDifferential`, optional + Any differential classes that should be associated with this + representation. The input must either be a single + `CartesianDifferential` instance, or a dictionary of + `CartesianDifferential` s with keys set to a string representation of + the SI unit with which the differential (derivative) is taken. For + example, for a velocity differential on a positional representation, the + key would be ``'s'`` for seconds, indicating that the derivative is a + time derivative. + + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + + attr_classes = OrderedDict([('x', u.Quantity), + ('y', u.Quantity), + ('z', u.Quantity)]) + + def __init__(self, x, y=None, z=None, unit=None, xyz_axis=None, + differentials=None, copy=True): + + if y is None and z is None: + if xyz_axis is not None and xyz_axis != 0: + x = np.rollaxis(x, xyz_axis, 0) + x, y, z = x + elif xyz_axis is not None: + raise ValueError("xyz_axis should only be set if x, y, and z are " + "in a single array passed in through x, " + "i.e., y and z should not be not given.") + elif (y is None and z is not None) or (y is not None and z is None): + raise ValueError("x, y, and z are required to instantiate {0}" + .format(self.__class__.__name__)) + + if unit is not None: + x = u.Quantity(x, unit, copy=copy, subok=True) + y = u.Quantity(y, unit, copy=copy, subok=True) + z = u.Quantity(z, unit, copy=copy, subok=True) + copy = False + + super(CartesianRepresentation, self).__init__(x, y, z, copy=copy, + differentials=differentials) + if not (self._x.unit.physical_type == + self._y.unit.physical_type == self._z.unit.physical_type): + raise u.UnitsError("x, y, and z should have matching physical types") + + def unit_vectors(self): + l = broadcast_to(1.*u.one, self.shape, subok=True) + o = broadcast_to(0.*u.one, self.shape, subok=True) + return OrderedDict( + (('x', CartesianRepresentation(l, o, o, copy=False)), + ('y', CartesianRepresentation(o, l, o, copy=False)), + ('z', CartesianRepresentation(o, o, l, copy=False)))) + + def scale_factors(self): + l = broadcast_to(1.*u.one, self.shape, subok=True) + return OrderedDict((('x', l), ('y', l), ('z', l))) + + def get_xyz(self, xyz_axis=0): + """Return a vector array of the x, y, and z coordinates. + + Parameters + ---------- + xyz_axis : int, optional + The axis in the final array along which the x, y, z components + should be stored (default: 0). + + Returns + ------- + xyz : `~astropy.units.Quantity` + With dimension 3 along ``xyz_axis``. + """ + return _combine_xyz(self._x, self._y, self._z, xyz_axis=xyz_axis) + + xyz = property(get_xyz) + + @classmethod + def from_cartesian(cls, other): + return other + + def to_cartesian(self): + return self + + def transform(self, matrix): + """ + Transform the cartesian coordinates using a 3x3 matrix. + + This returns a new representation and does not modify the original one. + Any differentials attached to this representation will also be + transformed. + + Parameters + ---------- + matrix : `~numpy.ndarray` + A 3x3 transformation matrix, such as a rotation matrix. + + + Examples + -------- + + We can start off by creating a cartesian representation object: + + >>> from astropy import units as u + >>> from astropy.coordinates import CartesianRepresentation + >>> rep = CartesianRepresentation([1, 2] * u.pc, + ... [2, 3] * u.pc, + ... [3, 4] * u.pc) + + We now create a rotation matrix around the z axis: + + >>> from astropy.coordinates.matrix_utilities import rotation_matrix + >>> rotation = rotation_matrix(30 * u.deg, axis='z') + + Finally, we can apply this transformation: + + >>> rep_new = rep.transform(rotation) + >>> rep_new.xyz # doctest: +FLOAT_CMP + + """ + + # Avoid doing gratuitous np.array for things that look like arrays. + try: + matrix_shape = matrix.shape + except AttributeError: + matrix = np.array(matrix) + matrix_shape = matrix.shape + + if matrix_shape[-2:] != (3, 3): + raise ValueError("tried to do matrix multiplication with an array " + "that doesn't end in 3x3") + + # TODO: since this is likely to be a widely used function in coordinate + # transforms, it should be optimized (for example in Cython). + + # Get xyz once since it's an expensive operation + oldxyz = self.xyz + # Note that neither dot nor einsum handles Quantity properly, so we use + # the arrays and put the unit back in the end. + if self.isscalar and not matrix_shape[:-2]: + # a fast path for scalar coordinates. + newxyz = matrix.dot(oldxyz.value) + else: + # Matrix multiply all pmat items and coordinates, broadcasting the + # remaining dimensions. + newxyz = np.einsum('...ij,j...->i...', matrix, oldxyz.value) + + newxyz = u.Quantity(newxyz, oldxyz.unit, copy=False) + # Handle differentials attached to this representation + if self.differentials: + # TODO: speed this up going via d.d_xyz. + new_diffs = dict( + (k, d.from_cartesian(d.to_cartesian().transform(matrix))) + for k, d in self.differentials.items()) + else: + new_diffs = None + + return self.__class__(*newxyz, copy=False, differentials=new_diffs) + + def _combine_operation(self, op, other, reverse=False): + self._raise_if_has_differentials(op.__name__) + + try: + other_c = other.to_cartesian() + except Exception: + return NotImplemented + + first, second = ((self, other_c) if not reverse else + (other_c, self)) + return self.__class__(*(op(getattr(first, component), + getattr(second, component)) + for component in first.components)) + + def mean(self, *args, **kwargs): + """Vector mean. + + Returns a new CartesianRepresentation instance with the means of the + x, y, and z components. + + Refer to `~numpy.mean` for full documentation of the arguments, noting + that ``axis`` is the entry in the ``shape`` of the representation, and + that the ``out`` argument cannot be used. + """ + self._raise_if_has_differentials('mean') + return self._apply('mean', *args, **kwargs) + + def sum(self, *args, **kwargs): + """Vector sum. + + Returns a new CartesianRepresentation instance with the sums of the + x, y, and z components. + + Refer to `~numpy.sum` for full documentation of the arguments, noting + that ``axis`` is the entry in the ``shape`` of the representation, and + that the ``out`` argument cannot be used. + """ + self._raise_if_has_differentials('sum') + return self._apply('sum', *args, **kwargs) + + def dot(self, other): + """Dot product of two representations. + + Note that any associated differentials will be dropped during this + operation. + + Parameters + ---------- + other : representation + If not already cartesian, it is converted. + + Returns + ------- + dot_product : `~astropy.units.Quantity` + The sum of the product of the x, y, and z components of ``self`` + and ``other``. + """ + try: + other_c = other.to_cartesian() + except Exception: + raise TypeError("cannot only take dot product with another " + "representation, not a {0} instance." + .format(type(other))) + return functools.reduce(operator.add, + (getattr(self, component) * + getattr(other_c, component) + for component in self.components)) + + def cross(self, other): + """Cross product of two representations. + + Parameters + ---------- + other : representation + If not already cartesian, it is converted. + + Returns + ------- + cross_product : `~astropy.coordinates.CartesianRepresentation` + With vectors perpendicular to both ``self`` and ``other``. + """ + self._raise_if_has_differentials('cross') + try: + other_c = other.to_cartesian() + except Exception: + raise TypeError("cannot only take cross product with another " + "representation, not a {0} instance." + .format(type(other))) + return self.__class__(self.y * other_c.z - self.z * other_c.y, + self.z * other_c.x - self.x * other_c.z, + self.x * other_c.y - self.y * other_c.x) + + +class UnitSphericalRepresentation(BaseRepresentation): + """ + Representation of points on a unit sphere. + + Parameters + ---------- + lon, lat : `~astropy.units.Quantity` or str + The longitude and latitude of the point(s), in angular units. The + latitude should be between -90 and 90 degrees, and the longitude will + be wrapped to an angle between 0 and 360 degrees. These can also be + instances of `~astropy.coordinates.Angle`, + `~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`. + + differentials : dict, `BaseDifferential`, optional + Any differential classes that should be associated with this + representation. The input must either be a single `BaseDifferential` + instance (see `._compatible_differentials` for valid types), or a + dictionary of of differential instances with keys set to a string + representation of the SI unit with which the differential (derivative) + is taken. For example, for a velocity differential on a positional + representation, the key would be ``'s'`` for seconds, indicating that + the derivative is a time derivative. + + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + + attr_classes = OrderedDict([('lon', Longitude), + ('lat', Latitude)]) + recommended_units = {'lon': u.deg, 'lat': u.deg} + + @classproperty + def _dimensional_representation(cls): + return SphericalRepresentation + + def __init__(self, lon, lat, differentials=None, copy=True): + super(UnitSphericalRepresentation, + self).__init__(lon, lat, differentials=differentials, copy=copy) + + @property + def _compatible_differentials(self): + return [UnitSphericalDifferential, UnitSphericalCosLatDifferential, + SphericalDifferential, SphericalCosLatDifferential, + RadialDifferential] + + # Could let the metaclass define these automatically, but good to have + # a bit clearer docstrings. + @property + def lon(self): + """ + The longitude of the point(s). + """ + return self._lon + + @property + def lat(self): + """ + The latitude of the point(s). + """ + return self._lat + + def unit_vectors(self): + sinlon, coslon = np.sin(self.lon), np.cos(self.lon) + sinlat, coslat = np.sin(self.lat), np.cos(self.lat) + return OrderedDict( + (('lon', CartesianRepresentation(-sinlon, coslon, 0., copy=False)), + ('lat', CartesianRepresentation(-sinlat*coslon, -sinlat*sinlon, + coslat, copy=False)))) + + def scale_factors(self, omit_coslat=False): + sf_lat = broadcast_to(1./u.radian, self.shape, subok=True) + sf_lon = sf_lat if omit_coslat else np.cos(self.lat) / u.radian + return OrderedDict((('lon', sf_lon), + ('lat', sf_lat))) + + def to_cartesian(self): + """ + Converts spherical polar coordinates to 3D rectangular cartesian + coordinates. + """ + x = np.cos(self.lat) * np.cos(self.lon) + y = np.cos(self.lat) * np.sin(self.lon) + z = np.sin(self.lat) + + return CartesianRepresentation(x=x, y=y, z=z, copy=False) + + @classmethod + def from_cartesian(cls, cart): + """ + Converts 3D rectangular cartesian coordinates to spherical polar + coordinates. + """ + + s = np.hypot(cart.x, cart.y) + + lon = np.arctan2(cart.y, cart.x) + lat = np.arctan2(cart.z, s) + + return cls(lon=lon, lat=lat, copy=False) + + def represent_as(self, other_class, differential_class=None): + # Take a short cut if the other class is a spherical representation + + # TODO: this could be optimized to shortcut even if a differential_class + # is passed in, using the ._re_represent_differentials() method + if inspect.isclass(other_class) and not differential_class: + if issubclass(other_class, PhysicsSphericalRepresentation): + return other_class(phi=self.lon, theta=90 * u.deg - self.lat, r=1.0, + copy=False) + elif issubclass(other_class, SphericalRepresentation): + return other_class(lon=self.lon, lat=self.lat, distance=1.0, + copy=False) + + return super(UnitSphericalRepresentation, + self).represent_as(other_class, differential_class) + + def __mul__(self, other): + self._raise_if_has_differentials('multiplication') + return self._dimensional_representation(lon=self.lon, lat=self.lat, + distance=1. * other) + + def __truediv__(self, other): + self._raise_if_has_differentials('division') + return self._dimensional_representation(lon=self.lon, lat=self.lat, + distance=1. / other) + + def __neg__(self): + self._raise_if_has_differentials('negation') + return self.__class__(self.lon + 180. * u.deg, -self.lat, copy=False) + + def norm(self): + """Vector norm. + + The norm is the standard Frobenius norm, i.e., the square root of the + sum of the squares of all components with non-angular units, which is + always unity for vectors on the unit sphere. + + Returns + ------- + norm : `~astropy.units.Quantity` + Dimensionless ones, with the same shape as the representation. + """ + return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled, + copy=False) + + def _combine_operation(self, op, other, reverse=False): + self._raise_if_has_differentials(op.__name__) + + result = self.to_cartesian()._combine_operation(op, other, reverse) + if result is NotImplemented: + return NotImplemented + else: + return self._dimensional_representation.from_cartesian(result) + + def mean(self, *args, **kwargs): + """Vector mean. + + The representation is converted to cartesian, the means of the x, y, + and z components are calculated, and the result is converted to a + `~astropy.coordinates.SphericalRepresentation`. + + Refer to `~numpy.mean` for full documentation of the arguments, noting + that ``axis`` is the entry in the ``shape`` of the representation, and + that the ``out`` argument cannot be used. + """ + self._raise_if_has_differentials('mean') + return self._dimensional_representation.from_cartesian( + self.to_cartesian().mean(*args, **kwargs)) + + def sum(self, *args, **kwargs): + """Vector sum. + + The representation is converted to cartesian, the sums of the x, y, + and z components are calculated, and the result is converted to a + `~astropy.coordinates.SphericalRepresentation`. + + Refer to `~numpy.sum` for full documentation of the arguments, noting + that ``axis`` is the entry in the ``shape`` of the representation, and + that the ``out`` argument cannot be used. + """ + self._raise_if_has_differentials('sum') + return self._dimensional_representation.from_cartesian( + self.to_cartesian().sum(*args, **kwargs)) + + def cross(self, other): + """Cross product of two representations. + + The calculation is done by converting both ``self`` and ``other`` + to `~astropy.coordinates.CartesianRepresentation`, and converting the + result back to `~astropy.coordinates.SphericalRepresentation`. + + Parameters + ---------- + other : representation + The representation to take the cross product with. + + Returns + ------- + cross_product : `~astropy.coordinates.SphericalRepresentation` + With vectors perpendicular to both ``self`` and ``other``. + """ + self._raise_if_has_differentials('cross') + return self._dimensional_representation.from_cartesian( + self.to_cartesian().cross(other)) + + +class RadialRepresentation(BaseRepresentation): + """ + Representation of the distance of points from the origin. + + Note that this is mostly intended as an internal helper representation. + It can do little else but being used as a scale in multiplication. + + Parameters + ---------- + distance : `~astropy.units.Quantity` + The distance of the point(s) from the origin. + + differentials : dict, `BaseDifferential`, optional + Any differential classes that should be associated with this + representation. The input must either be a single `BaseDifferential` + instance (see `._compatible_differentials` for valid types), or a + dictionary of of differential instances with keys set to a string + representation of the SI unit with which the differential (derivative) + is taken. For example, for a velocity differential on a positional + representation, the key would be ``'s'`` for seconds, indicating that + the derivative is a time derivative. + + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + + attr_classes = OrderedDict([('distance', u.Quantity)]) + + def __init__(self, distance, differentials=None, copy=True): + super(RadialRepresentation, self).__init__(distance, copy=copy, + differentials=differentials) + + @property + def distance(self): + """ + The distance from the origin to the point(s). + """ + return self._distance + + def unit_vectors(self): + """Cartesian unit vectors are undefined for radial representation.""" + raise NotImplementedError('Cartesian unit vectors are undefined for ' + '{0} instances'.format(self.__class__)) + + def scale_factors(self): + l = broadcast_to(1.*u.one, self.shape, subok=True) + return OrderedDict((('distance', l),)) + + def to_cartesian(self): + """Cannot convert radial representation to cartesian.""" + raise NotImplementedError('cannot convert {0} instance to cartesian.' + .format(self.__class__)) + + @classmethod + def from_cartesian(cls, cart): + """ + Converts 3D rectangular cartesian coordinates to radial coordinate. + """ + return cls(distance=cart.norm(), copy=False) + + def _scale_operation(self, op, *args): + self._raise_if_has_differentials(op.__name__) + return op(self.distance, *args) + + def norm(self): + """Vector norm. + + Just the distance itself. + + Returns + ------- + norm : `~astropy.units.Quantity` + Dimensionless ones, with the same shape as the representation. + """ + return self.distance + + def _combine_operation(self, op, other, reverse=False): + return NotImplemented + + +class SphericalRepresentation(BaseRepresentation): + """ + Representation of points in 3D spherical coordinates. + + Parameters + ---------- + lon, lat : `~astropy.units.Quantity` + The longitude and latitude of the point(s), in angular units. The + latitude should be between -90 and 90 degrees, and the longitude will + be wrapped to an angle between 0 and 360 degrees. These can also be + instances of `~astropy.coordinates.Angle`, + `~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`. + + distance : `~astropy.units.Quantity` + The distance to the point(s). If the distance is a length, it is + passed to the :class:`~astropy.coordinates.Distance` class, otherwise + it is passed to the :class:`~astropy.units.Quantity` class. + + differentials : dict, `BaseDifferential`, optional + Any differential classes that should be associated with this + representation. The input must either be a single `BaseDifferential` + instance (see `._compatible_differentials` for valid types), or a + dictionary of of differential instances with keys set to a string + representation of the SI unit with which the differential (derivative) + is taken. For example, for a velocity differential on a positional + representation, the key would be ``'s'`` for seconds, indicating that + the derivative is a time derivative. + + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + + attr_classes = OrderedDict([('lon', Longitude), + ('lat', Latitude), + ('distance', u.Quantity)]) + recommended_units = {'lon': u.deg, 'lat': u.deg} + _unit_representation = UnitSphericalRepresentation + + def __init__(self, lon, lat, distance, differentials=None, copy=True): + super(SphericalRepresentation, + self).__init__(lon, lat, distance, copy=copy, + differentials=differentials) + if self._distance.unit.physical_type == 'length': + self._distance = self._distance.view(Distance) + + @property + def _compatible_differentials(self): + return [UnitSphericalDifferential, UnitSphericalCosLatDifferential, + SphericalDifferential, SphericalCosLatDifferential, + RadialDifferential] + + @property + def lon(self): + """ + The longitude of the point(s). + """ + return self._lon + + @property + def lat(self): + """ + The latitude of the point(s). + """ + return self._lat + + @property + def distance(self): + """ + The distance from the origin to the point(s). + """ + return self._distance + + def unit_vectors(self): + sinlon, coslon = np.sin(self.lon), np.cos(self.lon) + sinlat, coslat = np.sin(self.lat), np.cos(self.lat) + return OrderedDict( + (('lon', CartesianRepresentation(-sinlon, coslon, 0., copy=False)), + ('lat', CartesianRepresentation(-sinlat*coslon, -sinlat*sinlon, + coslat, copy=False)), + ('distance', CartesianRepresentation(coslat*coslon, coslat*sinlon, + sinlat, copy=False)))) + + def scale_factors(self, omit_coslat=False): + sf_lat = self.distance / u.radian + sf_lon = sf_lat if omit_coslat else sf_lat * np.cos(self.lat) + sf_distance = broadcast_to(1.*u.one, self.shape, subok=True) + return OrderedDict((('lon', sf_lon), + ('lat', sf_lat), + ('distance', sf_distance))) + + def represent_as(self, other_class, differential_class=None): + # Take a short cut if the other class is a spherical representation + + # TODO: this could be optimized to shortcut even if a differential_class + # is passed in, using the ._re_represent_differentials() method + if inspect.isclass(other_class) and not differential_class: + if issubclass(other_class, PhysicsSphericalRepresentation): + return other_class(phi=self.lon, theta=90 * u.deg - self.lat, + r=self.distance, copy=False) + elif issubclass(other_class, UnitSphericalRepresentation): + return other_class(lon=self.lon, lat=self.lat, copy=False) + + return super(SphericalRepresentation, + self).represent_as(other_class, differential_class) + + def to_cartesian(self): + """ + Converts spherical polar coordinates to 3D rectangular cartesian + coordinates. + """ + + # We need to convert Distance to Quantity to allow negative values. + if isinstance(self.distance, Distance): + d = self.distance.view(u.Quantity) + else: + d = self.distance + + x = d * np.cos(self.lat) * np.cos(self.lon) + y = d * np.cos(self.lat) * np.sin(self.lon) + z = d * np.sin(self.lat) + + return CartesianRepresentation(x=x, y=y, z=z, copy=False) + + @classmethod + def from_cartesian(cls, cart): + """ + Converts 3D rectangular cartesian coordinates to spherical polar + coordinates. + """ + + s = np.hypot(cart.x, cart.y) + r = np.hypot(s, cart.z) + + lon = np.arctan2(cart.y, cart.x) + lat = np.arctan2(cart.z, s) + + return cls(lon=lon, lat=lat, distance=r, copy=False) + + def norm(self): + """Vector norm. + + The norm is the standard Frobenius norm, i.e., the square root of the + sum of the squares of all components with non-angular units. For + spherical coordinates, this is just the absolute value of the distance. + + Returns + ------- + norm : `astropy.units.Quantity` + Vector norm, with the same shape as the representation. + """ + return np.abs(self.distance) + + +class PhysicsSphericalRepresentation(BaseRepresentation): + """ + Representation of points in 3D spherical coordinates (using the physics + convention of using ``phi`` and ``theta`` for azimuth and inclination + from the pole). + + Parameters + ---------- + phi, theta : `~astropy.units.Quantity` or str + The azimuth and inclination of the point(s), in angular units. The + inclination should be between 0 and 180 degrees, and the azimuth will + be wrapped to an angle between 0 and 360 degrees. These can also be + instances of `~astropy.coordinates.Angle`. If ``copy`` is False, `phi` + will be changed inplace if it is not between 0 and 360 degrees. + + r : `~astropy.units.Quantity` + The distance to the point(s). If the distance is a length, it is + passed to the :class:`~astropy.coordinates.Distance` class, otherwise + it is passed to the :class:`~astropy.units.Quantity` class. + + differentials : dict, `PhysicsSphericalDifferential`, optional + Any differential classes that should be associated with this + representation. The input must either be a single + `PhysicsSphericalDifferential` instance, or a dictionary of of + differential instances with keys set to a string representation of the + SI unit with which the differential (derivative) is taken. For example, + for a velocity differential on a positional representation, the key + would be ``'s'`` for seconds, indicating that the derivative is a time + derivative. + + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + + attr_classes = OrderedDict([('phi', Angle), + ('theta', Angle), + ('r', u.Quantity)]) + recommended_units = {'phi': u.deg, 'theta': u.deg} + + def __init__(self, phi, theta, r, differentials=None, copy=True): + super(PhysicsSphericalRepresentation, + self).__init__(phi, theta, r, copy=copy, + differentials=differentials) + + # Wrap/validate phi/theta + if copy: + self._phi = self._phi.wrap_at(360 * u.deg) + else: + # necessary because the above version of `wrap_at` has to be a copy + self._phi.wrap_at(360 * u.deg, inplace=True) + + if np.any(self._theta < 0.*u.deg) or np.any(self._theta > 180.*u.deg): + raise ValueError('Inclination angle(s) must be within ' + '0 deg <= angle <= 180 deg, ' + 'got {0}'.format(theta.to(u.degree))) + + if self._r.unit.physical_type == 'length': + self._r = self._r.view(Distance) + + @property + def phi(self): + """ + The azimuth of the point(s). + """ + return self._phi + + @property + def theta(self): + """ + The elevation of the point(s). + """ + return self._theta + + @property + def r(self): + """ + The distance from the origin to the point(s). + """ + return self._r + + def unit_vectors(self): + sinphi, cosphi = np.sin(self.phi), np.cos(self.phi) + sintheta, costheta = np.sin(self.theta), np.cos(self.theta) + return OrderedDict( + (('phi', CartesianRepresentation(-sinphi, cosphi, 0., copy=False)), + ('theta', CartesianRepresentation(costheta*cosphi, + costheta*sinphi, + -sintheta, copy=False)), + ('r', CartesianRepresentation(sintheta*cosphi, sintheta*sinphi, + costheta, copy=False)))) + + def scale_factors(self): + r = self.r / u.radian + sintheta = np.sin(self.theta) + l = broadcast_to(1.*u.one, self.shape, subok=True) + return OrderedDict((('phi', r * sintheta), + ('theta', r), + ('r', l))) + + def represent_as(self, other_class, differential_class=None): + # Take a short cut if the other class is a spherical representation + + # TODO: this could be optimized to shortcut even if a differential_class + # is passed in, using the ._re_represent_differentials() method + if inspect.isclass(other_class) and not differential_class: + if issubclass(other_class, SphericalRepresentation): + return other_class(lon=self.phi, lat=90 * u.deg - self.theta, + distance=self.r) + elif issubclass(other_class, UnitSphericalRepresentation): + return other_class(lon=self.phi, lat=90 * u.deg - self.theta) + + return super(PhysicsSphericalRepresentation, + self).represent_as(other_class, differential_class) + + def to_cartesian(self): + """ + Converts spherical polar coordinates to 3D rectangular cartesian + coordinates. + """ + + # We need to convert Distance to Quantity to allow negative values. + if isinstance(self.r, Distance): + d = self.r.view(u.Quantity) + else: + d = self.r + + x = d * np.sin(self.theta) * np.cos(self.phi) + y = d * np.sin(self.theta) * np.sin(self.phi) + z = d * np.cos(self.theta) + + return CartesianRepresentation(x=x, y=y, z=z, copy=False) + + @classmethod + def from_cartesian(cls, cart): + """ + Converts 3D rectangular cartesian coordinates to spherical polar + coordinates. + """ + + s = np.hypot(cart.x, cart.y) + r = np.hypot(s, cart.z) + + phi = np.arctan2(cart.y, cart.x) + theta = np.arctan2(s, cart.z) + + return cls(phi=phi, theta=theta, r=r, copy=False) + + def norm(self): + """Vector norm. + + The norm is the standard Frobenius norm, i.e., the square root of the + sum of the squares of all components with non-angular units. For + spherical coordinates, this is just the absolute value of the radius. + + Returns + ------- + norm : `astropy.units.Quantity` + Vector norm, with the same shape as the representation. + """ + return np.abs(self.r) + + +class CylindricalRepresentation(BaseRepresentation): + """ + Representation of points in 3D cylindrical coordinates. + + Parameters + ---------- + rho : `~astropy.units.Quantity` + The distance from the z axis to the point(s). + + phi : `~astropy.units.Quantity` or str + The azimuth of the point(s), in angular units, which will be wrapped + to an angle between 0 and 360 degrees. This can also be instances of + `~astropy.coordinates.Angle`, + + z : `~astropy.units.Quantity` + The z coordinate(s) of the point(s) + + differentials : dict, `CylindricalDifferential`, optional + Any differential classes that should be associated with this + representation. The input must either be a single + `CylindricalDifferential` instance, or a dictionary of of differential + instances with keys set to a string representation of the SI unit with + which the differential (derivative) is taken. For example, for a + velocity differential on a positional representation, the key would be + ``'s'`` for seconds, indicating that the derivative is a time + derivative. + + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + + attr_classes = OrderedDict([('rho', u.Quantity), + ('phi', Angle), + ('z', u.Quantity)]) + recommended_units = {'phi': u.deg} + + def __init__(self, rho, phi, z, differentials=None, copy=True): + super(CylindricalRepresentation, + self).__init__(rho, phi, z, copy=copy, + differentials=differentials) + + if not self._rho.unit.is_equivalent(self._z.unit): + raise u.UnitsError("rho and z should have matching physical types") + + @property + def rho(self): + """ + The distance of the point(s) from the z-axis. + """ + return self._rho + + @property + def phi(self): + """ + The azimuth of the point(s). + """ + return self._phi + + @property + def z(self): + """ + The height of the point(s). + """ + return self._z + + def unit_vectors(self): + sinphi, cosphi = np.sin(self.phi), np.cos(self.phi) + l = broadcast_to(1., self.shape) + return OrderedDict( + (('rho', CartesianRepresentation(cosphi, sinphi, 0, copy=False)), + ('phi', CartesianRepresentation(-sinphi, cosphi, 0, copy=False)), + ('z', CartesianRepresentation(0, 0, l, unit=u.one, copy=False)))) + + def scale_factors(self): + rho = self.rho / u.radian + l = broadcast_to(1.*u.one, self.shape, subok=True) + return OrderedDict((('rho', l), + ('phi', rho), + ('z', l))) + + @classmethod + def from_cartesian(cls, cart): + """ + Converts 3D rectangular cartesian coordinates to cylindrical polar + coordinates. + """ + + rho = np.hypot(cart.x, cart.y) + phi = np.arctan2(cart.y, cart.x) + z = cart.z + + return cls(rho=rho, phi=phi, z=z, copy=False) + + def to_cartesian(self): + """ + Converts cylindrical polar coordinates to 3D rectangular cartesian + coordinates. + """ + x = self.rho * np.cos(self.phi) + y = self.rho * np.sin(self.phi) + z = self.z + + return CartesianRepresentation(x=x, y=y, z=z, copy=False) + + +class MetaBaseDifferential(InheritDocstrings, abc.ABCMeta): + """Set default ``attr_classes`` and component getters on a Differential. + + For these, the components are those of the base representation prefixed + by 'd_', and the class is `~astropy.units.Quantity`. + """ + def __init__(cls, name, bases, dct): + super(MetaBaseDifferential, cls).__init__(name, bases, dct) + + # Don't do anything for base helper classes. + if cls.__name__ in ('BaseDifferential', 'BaseSphericalDifferential', + 'BaseSphericalCosLatDifferential'): + return + + if 'base_representation' not in dct: + raise NotImplementedError('Differential representations must have a' + '"base_representation" class attribute.') + + # If not defined explicitly, create attr_classes. + if not hasattr(cls, 'attr_classes'): + base_attr_classes = cls.base_representation.attr_classes + cls.attr_classes = OrderedDict([('d_' + c, u.Quantity) + for c in base_attr_classes]) + + repr_name = cls.get_name() + if repr_name in DIFFERENTIAL_CLASSES: + raise ValueError("Differential class {0} already defined" + .format(repr_name)) + + DIFFERENTIAL_CLASSES[repr_name] = cls + + # If not defined explicitly, create properties for the components. + for component in cls.attr_classes: + if not hasattr(cls, component): + setattr(cls, component, + property(_make_getter(component), + doc=("Component '{0}' of the Differential." + .format(component)))) + + +@six.add_metaclass(MetaBaseDifferential) +class BaseDifferential(BaseRepresentationOrDifferential): + r"""A base class representing differentials of representations. + + These represent differences or derivatives along each component. + E.g., for physics spherical coordinates, these would be + :math:`\delta r, \delta \theta, \delta \phi`. + + Parameters + ---------- + d_comp1, d_comp2, d_comp3 : `~astropy.units.Quantity` or subclass + The components of the 3D differentials. The names are the keys and the + subclasses the values of the ``attr_classes`` attribute. + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + + Notes + ----- + All differential representation classes should subclass this base class, + and define an ``base_representation`` attribute with the class of the + regular `~astropy.coordinates.BaseRepresentation` for which differential + coordinates are provided. This will set up a default ``attr_classes`` + instance with names equal to the base component names prefixed by ``d_``, + and all classes set to `~astropy.units.Quantity`, plus properties to access + those, and a default ``__init__`` for initialization. + """ + + recommended_units = {} # subclasses can override + + @classmethod + def _check_base(cls, base): + if cls not in base._compatible_differentials: + raise TypeError("Differential class {0} is not compatible with the " + "base (representation) class {1}" + .format(cls, base.__class__)) + + def _get_deriv_key(self, base): + """Given a base (representation instance), determine the unit of the + derivative by removing the representation unit from the component units + of this differential. + """ + + # This check is just a last resort so we don't return a strange unit key + # from accidentally passing in the wrong base. + self._check_base(base) + + for name in base.components: + comp = getattr(base, name) + d_comp = getattr(self, 'd_{0}'.format(name), None) + if d_comp: + d_unit = comp.unit / d_comp.unit + # Get the si unit without a scale by going via Quantity; + # `.si` causes the scale to be included in the value. + return str(u.Quantity(1., d_unit).si.unit) + + else: + raise RuntimeError("Invalid representation-differential match! Not " + "sure how we got into this state.") + + @classmethod + def _get_base_vectors(cls, base): + """Get unit vectors and scale factors from base. + + Parameters + ---------- + base : instance of ``self.base_representation`` + The points for which the unit vectors and scale factors should be + retrieved. + + Returns + ------- + unit_vectors : dict of `CartesianRepresentation` + In the directions of the coordinates of base. + scale_factors : dict of `~astropy.units.Quantity` + Scale factors for each of the coordinates + + Raises + ------ + TypeError : if the base is not of the correct type + """ + cls._check_base(base) + return base.unit_vectors(), base.scale_factors() + + def to_cartesian(self, base): + """Convert the differential to 3D rectangular cartesian coordinates. + + Parameters + ---------- + base : instance of ``self.base_representation`` + The points for which the differentials are to be converted: each of + the components is multiplied by its unit vectors and scale factors. + + Returns + ------- + This object as a `CartesianDifferential` + """ + base_e, base_sf = self._get_base_vectors(base) + return functools.reduce( + operator.add, (getattr(self, d_c) * base_sf[c] * base_e[c] + for d_c, c in zip(self.components, base.components))) + + @classmethod + def from_cartesian(cls, other, base): + """Convert the differential from 3D rectangular cartesian coordinates to + the desired class. + + Parameters + ---------- + other : + The object to convert into this differential. + base : instance of ``self.base_representation`` + The points for which the differentials are to be converted: each of + the components is multiplied by its unit vectors and scale factors. + + Returns + ------- + A new differential object that is this class' type. + """ + base_e, base_sf = cls._get_base_vectors(base) + return cls(*(other.dot(e / base_sf[component]) + for component, e in six.iteritems(base_e)), copy=False) + + def represent_as(self, other_class, base): + """Convert coordinates to another representation. + + If the instance is of the requested class, it is returned unmodified. + By default, conversion is done via cartesian coordinates. + + Parameters + ---------- + other_class : `~astropy.coordinates.BaseRepresentation` subclass + The type of representation to turn the coordinates into. + base : instance of ``self.base_representation``, optional + Base relative to which the differentials are defined. If the other + class is a differential representation, the base will be converted + to its ``base_representation``. + """ + if other_class is self.__class__: + return self + + # The default is to convert via cartesian coordinates. + self_cartesian = self.to_cartesian(base) + if issubclass(other_class, BaseDifferential): + base = base.represent_as(other_class.base_representation) + return other_class.from_cartesian(self_cartesian, base) + else: + return other_class.from_cartesian(self_cartesian) + + @classmethod + def from_representation(cls, representation, base): + """Create a new instance of this representation from another one. + + Parameters + ---------- + representation : `~astropy.coordinates.BaseRepresentation` instance + The presentation that should be converted to this class. + base : instance of ``cls.base_representation`` + The base relative to which the differentials will be defined. If + the representation is a differential itself, the base will be + converted to its ``base_representation`` to help convert it. + """ + if isinstance(representation, BaseDifferential): + cartesian = representation.to_cartesian( + base.represent_as(representation.base_representation)) + else: + cartesian = representation.to_cartesian() + + return cls.from_cartesian(cartesian, base) + + def _scale_operation(self, op, *args): + """Scale all components. + + Parameters + ---------- + op : `~operator` callable + Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc. + *args + Any arguments required for the operator (typically, what is to + be multiplied with, divided by). + """ + scaled_attrs = [op(getattr(self, c), *args) for c in self.components] + return self.__class__(*scaled_attrs, copy=False) + + def _combine_operation(self, op, other, reverse=False): + """Combine two differentials, or a differential with a representation. + + If ``other`` is of the same differential type as ``self``, the + components will simply be combined. If ``other`` is a representation, + it will be used as a base for which to evaluate the differential, + and the result is a new representation. + + Parameters + ---------- + op : `~operator` callable + Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc. + other : `~astropy.coordinates.BaseRepresentation` instance + The other differential or representation. + reverse : bool + Whether the operands should be reversed (e.g., as we got here via + ``self.__rsub__`` because ``self`` is a subclass of ``other``). + """ + if isinstance(self, type(other)): + first, second = (self, other) if not reverse else (other, self) + return self.__class__(*[op(getattr(first, c), getattr(second, c)) + for c in self.components]) + else: + try: + self_cartesian = self.to_cartesian(other) + except TypeError: + return NotImplemented + + return other._combine_operation(op, self_cartesian, not reverse) + + def __sub__(self, other): + # avoid "differential - representation". + if isinstance(other, BaseRepresentation): + return NotImplemented + return super(BaseDifferential, self).__sub__(other) + + def norm(self, base=None): + """Vector norm. + + The norm is the standard Frobenius norm, i.e., the square root of the + sum of the squares of all components with non-angular units. + + Parameters + ---------- + base : instance of ``self.base_representation`` + Base relative to which the differentials are defined. This is + required to calculate the physical size of the differential for + all but cartesian differentials. + + Returns + ------- + norm : `astropy.units.Quantity` + Vector norm, with the same shape as the representation. + """ + return self.to_cartesian(base).norm() + + +class CartesianDifferential(BaseDifferential): + """Differentials in of points in 3D cartesian coordinates. + + Parameters + ---------- + d_x, d_y, d_z : `~astropy.units.Quantity` or array + The x, y, and z coordinates of the differentials. If ``d_x``, ``d_y``, + and ``d_z`` have different shapes, they should be broadcastable. If not + quantities, ``unit`` should be set. If only ``d_x`` is given, it is + assumed that it contains an array with the 3 coordinates stored along + ``xyz_axis``. + unit : `~astropy.units.Unit` or str + If given, the differentials will be converted to this unit (or taken to + be in this unit if not given. + xyz_axis : int, optional + The axis along which the coordinates are stored when a single array is + provided instead of distinct ``d_x``, ``d_y``, and ``d_z`` (default: 0). + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + base_representation = CartesianRepresentation + + def __init__(self, d_x, d_y=None, d_z=None, unit=None, xyz_axis=None, + copy=True): + + if d_y is None and d_z is None: + if xyz_axis is not None and xyz_axis != 0: + d_x = np.rollaxis(d_x, xyz_axis, 0) + d_x, d_y, d_z = d_x + elif xyz_axis is not None: + raise ValueError("xyz_axis should only be set if d_x, d_y, and d_z " + "are in a single array passed in through d_x, " + "i.e., d_y and d_z should not be not given.") + elif ((d_y is None and d_z is not None) or + (d_y is not None and d_z is None)): + raise ValueError("d_x, d_y, and d_z are required to instantiate {0}" + .format(self.__class__.__name__)) + + if unit is not None: + d_x = u.Quantity(d_x, unit, copy=copy, subok=True) + d_y = u.Quantity(d_y, unit, copy=copy, subok=True) + d_z = u.Quantity(d_z, unit, copy=copy, subok=True) + copy = False + + super(CartesianDifferential, self).__init__(d_x, d_y, d_z, copy=copy) + if not (self._d_x.unit.is_equivalent(self._d_y.unit) and + self._d_x.unit.is_equivalent(self._d_z.unit)): + raise u.UnitsError('d_x, d_y and d_z should have equivalent units.') + + def to_cartesian(self, base=None): + return CartesianRepresentation(*[getattr(self, c) for c + in self.components]) + + @classmethod + def from_cartesian(cls, other, base=None): + return cls(*[getattr(other, c) for c in other.components]) + + def get_d_xyz(self, xyz_axis=0): + """Return a vector array of the x, y, and z coordinates. + + Parameters + ---------- + xyz_axis : int, optional + The axis in the final array along which the x, y, z components + should be stored (default: 0). + + Returns + ------- + xyz : `~astropy.units.Quantity` + With dimension 3 along ``xyz_axis``. + """ + return _combine_xyz(self._d_x, self._d_y, self._d_z, xyz_axis=xyz_axis) + + d_xyz = property(get_d_xyz) + + +class BaseSphericalDifferential(BaseDifferential): + def _d_lon_coslat(self, base): + """Convert longitude differential d_lon to d_lon_coslat. + + Parameters + ---------- + base : instance of ``cls.base_representation`` + The base from which the latitude will be taken. + """ + self._check_base(base) + return self.d_lon * np.cos(base.lat) + + @classmethod + def _get_d_lon(cls, d_lon_coslat, base): + """Convert longitude differential d_lon_coslat to d_lon. + + Parameters + ---------- + d_lon_coslat : `~astropy.units.Quantity` + Longitude differential that includes ``cos(lat)``. + base : instance of ``cls.base_representation`` + The base from which the latitude will be taken. + """ + cls._check_base(base) + return d_lon_coslat / np.cos(base.lat) + + def _combine_operation(self, op, other, reverse=False): + """Combine two differentials, or a differential with a representation. + + If ``other`` is of the same differential type as ``self``, the + components will simply be combined. If both are different parts of + a `~astropy.coordinates.SphericalDifferential` (e.g., a + `~astropy.coordinates.UnitSphericalDifferential` and a + `~astropy.coordinates.RadialDifferential`), they will combined + appropriately. + + If ``other`` is a representation, it will be used as a base for which + to evaluate the differential, and the result is a new representation. + + Parameters + ---------- + op : `~operator` callable + Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc. + other : `~astropy.coordinates.BaseRepresentation` instance + The other differential or representation. + reverse : bool + Whether the operands should be reversed (e.g., as we got here via + ``self.__rsub__`` because ``self`` is a subclass of ``other``). + """ + if (isinstance(other, BaseSphericalDifferential) and + not isinstance(self, type(other)) or + isinstance(other, RadialDifferential)): + all_components = set(self.components) | set(other.components) + first, second = (self, other) if not reverse else (other, self) + result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.)) + for c in all_components} + return SphericalDifferential(**result_args) + + return super(BaseSphericalDifferential, + self)._combine_operation(op, other, reverse) + + +class UnitSphericalDifferential(BaseSphericalDifferential): + """Differential(s) of points on a unit sphere. + + Parameters + ---------- + d_lon, d_lat : `~astropy.units.Quantity` + The longitude and latitude of the differentials. + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + base_representation = UnitSphericalRepresentation + + @classproperty + def _dimensional_differential(cls): + return SphericalDifferential + + def __init__(self, d_lon, d_lat, copy=True): + super(UnitSphericalDifferential, + self).__init__(d_lon, d_lat, copy=copy) + if not self._d_lon.unit.is_equivalent(self._d_lat.unit): + raise u.UnitsError('d_lon and d_lat should have equivalent units.') + + def to_cartesian(self, base): + if isinstance(base, SphericalRepresentation): + scale = base.distance + elif isinstance(base, PhysicsSphericalRepresentation): + scale = base.r + else: + return super(UnitSphericalDifferential, self).to_cartesian(base) + + base = base.represent_as(UnitSphericalRepresentation) + return scale * super(UnitSphericalDifferential, self).to_cartesian(base) + + def represent_as(self, other_class, base=None): + # Only have enough information to represent other unit-spherical. + if issubclass(other_class, UnitSphericalCosLatDifferential): + return other_class(self._d_lon_coslat(base), self.d_lat) + + return super(UnitSphericalDifferential, + self).represent_as(other_class, base) + + @classmethod + def from_representation(cls, representation, base=None): + # All spherical differentials can be done without going to Cartesian, + # though CosLat needs base for the latitude. + if isinstance(representation, SphericalDifferential): + return cls(representation.d_lon, representation.d_lat) + elif isinstance(representation, (SphericalCosLatDifferential, + UnitSphericalCosLatDifferential)): + d_lon = cls._get_d_lon(representation.d_lon_coslat, base) + return cls(d_lon, representation.d_lat) + elif isinstance(representation, PhysicsSphericalDifferential): + return cls(representation.d_phi, -representation.d_theta) + + return super(UnitSphericalDifferential, + cls).from_representation(representation, base) + + +class SphericalDifferential(BaseSphericalDifferential): + """Differential(s) of points in 3D spherical coordinates. + + Parameters + ---------- + d_lon, d_lat : `~astropy.units.Quantity` + The differential longitude and latitude. + d_distance : `~astropy.units.Quantity` + The differential distance. + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + base_representation = SphericalRepresentation + _unit_differential = UnitSphericalDifferential + + def __init__(self, d_lon, d_lat, d_distance, copy=True): + super(SphericalDifferential, self).__init__(d_lon, d_lat, d_distance, + copy=copy) + if not self._d_lon.unit.is_equivalent(self._d_lat.unit): + raise u.UnitsError('d_lon and d_lat should have equivalent units.') + + def represent_as(self, other_class, base=None): + # All spherical differentials can be done without going to Cartesian, + # though CosLat needs base for the latitude. + if issubclass(other_class, UnitSphericalDifferential): + return other_class(self.d_lon, self.d_lat) + elif issubclass(other_class, RadialDifferential): + return other_class(self.d_distance) + elif issubclass(other_class, SphericalCosLatDifferential): + return other_class(self._d_lon_coslat(base), self.d_lat, + self.d_distance) + elif issubclass(other_class, UnitSphericalCosLatDifferential): + return other_class(self._d_lon_coslat(base), self.d_lat) + elif issubclass(other_class, PhysicsSphericalDifferential): + return other_class(self.d_lon, -self.d_lat, self.d_distance) + else: + return super(SphericalDifferential, + self).represent_as(other_class, base) + + @classmethod + def from_representation(cls, representation, base=None): + # Other spherical differentials can be done without going to Cartesian, + # though CosLat needs base for the latitude. + if isinstance(representation, SphericalCosLatDifferential): + d_lon = cls._get_d_lon(representation.d_lon_coslat, base) + return cls(d_lon, representation.d_lat, representation.d_distance) + elif isinstance(representation, PhysicsSphericalDifferential): + return cls(representation.d_phi, -representation.d_theta, + representation.d_r) + + return super(SphericalDifferential, + cls).from_representation(representation, base) + + +class BaseSphericalCosLatDifferential(BaseDifferential): + """Differtials from points on a spherical base representation. + + With cos(lat) assumed to be included in the longitude differential. + """ + @classmethod + def _get_base_vectors(cls, base): + """Get unit vectors and scale factors from (unit)spherical base. + + Parameters + ---------- + base : instance of ``self.base_representation`` + The points for which the unit vectors and scale factors should be + retrieved. + + Returns + ------- + unit_vectors : dict of `CartesianRepresentation` + In the directions of the coordinates of base. + scale_factors : dict of `~astropy.units.Quantity` + Scale factors for each of the coordinates. The scale factor for + longitude does not include the cos(lat) factor. + + Raises + ------ + TypeError : if the base is not of the correct type + """ + cls._check_base(base) + return base.unit_vectors(), base.scale_factors(omit_coslat=True) + + def _d_lon(self, base): + """Convert longitude differential with cos(lat) to one without. + + Parameters + ---------- + base : instance of ``cls.base_representation`` + The base from which the latitude will be taken. + """ + self._check_base(base) + return self.d_lon_coslat / np.cos(base.lat) + + @classmethod + def _get_d_lon_coslat(cls, d_lon, base): + """Convert longitude differential d_lon to d_lon_coslat. + + Parameters + ---------- + d_lon : `~astropy.units.Quantity` + Value of the longitude differential without ``cos(lat)``. + base : instance of ``cls.base_representation`` + The base from which the latitude will be taken. + """ + cls._check_base(base) + return d_lon * np.cos(base.lat) + + def _combine_operation(self, op, other, reverse=False): + """Combine two differentials, or a differential with a representation. + + If ``other`` is of the same differential type as ``self``, the + components will simply be combined. If both are different parts of + a `~astropy.coordinates.SphericalDifferential` (e.g., a + `~astropy.coordinates.UnitSphericalDifferential` and a + `~astropy.coordinates.RadialDifferential`), they will combined + appropriately. + + If ``other`` is a representation, it will be used as a base for which + to evaluate the differential, and the result is a new representation. + + Parameters + ---------- + op : `~operator` callable + Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc. + other : `~astropy.coordinates.BaseRepresentation` instance + The other differential or representation. + reverse : bool + Whether the operands should be reversed (e.g., as we got here via + ``self.__rsub__`` because ``self`` is a subclass of ``other``). + """ + if (isinstance(other, BaseSphericalCosLatDifferential) and + not isinstance(self, type(other)) or + isinstance(other, RadialDifferential)): + all_components = set(self.components) | set(other.components) + first, second = (self, other) if not reverse else (other, self) + result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.)) + for c in all_components} + return SphericalCosLatDifferential(**result_args) + + return super(BaseSphericalCosLatDifferential, + self)._combine_operation(op, other, reverse) + + +class UnitSphericalCosLatDifferential(BaseSphericalCosLatDifferential): + """Differential(s) of points on a unit sphere. + + Parameters + ---------- + d_lon_coslat, d_lat : `~astropy.units.Quantity` + The longitude and latitude of the differentials. + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + base_representation = UnitSphericalRepresentation + attr_classes = OrderedDict([('d_lon_coslat', u.Quantity), + ('d_lat', u.Quantity)]) + + @classproperty + def _dimensional_differential(cls): + return SphericalCosLatDifferential + + def __init__(self, d_lon_coslat, d_lat, copy=True): + super(UnitSphericalCosLatDifferential, + self).__init__(d_lon_coslat, d_lat, copy=copy) + if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit): + raise u.UnitsError('d_lon_coslat and d_lat should have equivalent ' + 'units.') + + def to_cartesian(self, base): + if isinstance(base, SphericalRepresentation): + scale = base.distance + elif isinstance(base, PhysicsSphericalRepresentation): + scale = base.r + else: + return super(UnitSphericalCosLatDifferential, + self).to_cartesian(base) + + base = base.represent_as(UnitSphericalRepresentation) + return scale * super(UnitSphericalCosLatDifferential, + self).to_cartesian(base) + + def represent_as(self, other_class, base=None): + # Only have enough information to represent other unit-spherical. + if issubclass(other_class, UnitSphericalDifferential): + return other_class(self._d_lon(base), self.d_lat) + + return super(UnitSphericalCosLatDifferential, + self).represent_as(other_class, base) + + @classmethod + def from_representation(cls, representation, base=None): + # All spherical differentials can be done without going to Cartesian, + # though w/o CosLat needs base for the latitude. + if isinstance(representation, SphericalCosLatDifferential): + return cls(representation.d_lon_coslat, representation.d_lat) + elif isinstance(representation, (SphericalDifferential, + UnitSphericalDifferential)): + d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base) + return cls(d_lon_coslat, representation.d_lat) + elif isinstance(representation, PhysicsSphericalDifferential): + d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base) + return cls(d_lon_coslat, -representation.d_theta) + + return super(UnitSphericalDifferential, + cls).from_representation(representation, base) + + +class SphericalCosLatDifferential(BaseSphericalCosLatDifferential): + """Differential(s) of points in 3D spherical coordinates. + + Parameters + ---------- + d_lon_coslat, d_lat : `~astropy.units.Quantity` + The differential longitude (with cos(lat) included) and latitude. + d_distance : `~astropy.units.Quantity` + The differential distance. + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + base_representation = SphericalRepresentation + _unit_differential = UnitSphericalCosLatDifferential + attr_classes = OrderedDict([('d_lon_coslat', u.Quantity), + ('d_lat', u.Quantity), + ('d_distance', u.Quantity)]) + + def __init__(self, d_lon_coslat, d_lat, d_distance, copy=True): + super(SphericalCosLatDifferential, + self).__init__(d_lon_coslat, d_lat, d_distance, copy=copy) + if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit): + raise u.UnitsError('d_lon_coslat and d_lat should have equivalent ' + 'units.') + + def represent_as(self, other_class, base=None): + # All spherical differentials can be done without going to Cartesian, + # though some need base for the latitude to remove cos(lat). + if issubclass(other_class, UnitSphericalCosLatDifferential): + return other_class(self.d_lon_coslat, self.d_lat) + elif issubclass(other_class, RadialDifferential): + return other_class(self.d_distance) + elif issubclass(other_class, SphericalDifferential): + return other_class(self._d_lon(base), self.d_lat, self.d_distance) + elif issubclass(other_class, UnitSphericalDifferential): + return other_class(self._d_lon(base), self.d_lat) + elif issubclass(other_class, PhysicsSphericalDifferential): + return other_class(self._d_lon(base), -self.d_lat, self.d_distance) + + return super(SphericalCosLatDifferential, + self).represent_as(other_class, base) + + @classmethod + def from_representation(cls, representation, base=None): + # Other spherical differentials can be done without going to Cartesian, + # though we need base for the latitude to remove coslat. + if isinstance(representation, SphericalDifferential): + d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base) + return cls(d_lon_coslat, representation.d_lat, + representation.d_distance) + elif isinstance(representation, PhysicsSphericalDifferential): + d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base) + return cls(d_lon_coslat, -representation.d_theta, + representation.d_r) + + return super(SphericalCosLatDifferential, + cls).from_representation(representation, base) + + +class RadialDifferential(BaseDifferential): + """Differential(s) of radial distances. + + Parameters + ---------- + d_distance : `~astropy.units.Quantity` + The differential distance. + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + base_representation = RadialRepresentation + + def to_cartesian(self, base): + return self.d_distance * base.represent_as( + UnitSphericalRepresentation).to_cartesian() + + @classmethod + def from_cartesian(cls, other, base): + return cls(other.dot(base.represent_as(UnitSphericalRepresentation)), + copy=False) + + @classmethod + def from_representation(cls, representation, base=None): + if isinstance(representation, (SphericalDifferential, + SphericalCosLatDifferential)): + return cls(representation.d_distance) + elif isinstance(representation, PhysicsSphericalDifferential): + return cls(representation.d_r) + else: + return super(RadialDifferential, + cls).from_representation(representation, base) + + def _combine_operation(self, op, other, reverse=False): + if isinstance(other, self.base_representation): + if reverse: + first, second = other.distance, self.d_distance + else: + first, second = self.d_distance, other.distance + return other.__class__(op(first, second), copy=False) + elif isinstance(other, (BaseSphericalDifferential, + BaseSphericalCosLatDifferential)): + all_components = set(self.components) | set(other.components) + first, second = (self, other) if not reverse else (other, self) + result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.)) + for c in all_components} + return SphericalDifferential(**result_args) + + else: + return super(RadialDifferential, + self)._combine_operation(op, other, reverse) + + +class PhysicsSphericalDifferential(BaseDifferential): + """Differential(s) of 3D spherical coordinates using physics convention. + + Parameters + ---------- + d_phi, d_theta : `~astropy.units.Quantity` + The differential azimuth and inclination. + d_r : `~astropy.units.Quantity` + The differential radial distance. + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + base_representation = PhysicsSphericalRepresentation + + def __init__(self, d_phi, d_theta, d_r, copy=True): + super(PhysicsSphericalDifferential, + self).__init__(d_phi, d_theta, d_r, copy=copy) + if not self._d_phi.unit.is_equivalent(self._d_theta.unit): + raise u.UnitsError('d_phi and d_theta should have equivalent ' + 'units.') + + def represent_as(self, other_class, base=None): + # All spherical differentials can be done without going to Cartesian, + # though CosLat needs base for the latitude. For those, explicitly + # do the equivalent of self._d_lon_coslat in SphericalDifferential. + if issubclass(other_class, SphericalDifferential): + return other_class(self.d_phi, -self.d_theta, self.d_r) + elif issubclass(other_class, UnitSphericalDifferential): + return other_class(self.d_phi, -self.d_theta) + elif issubclass(other_class, SphericalCosLatDifferential): + self._check_base(base) + d_lon_coslat = self.d_phi * np.sin(base.theta) + return other_class(d_lon_coslat, -self.d_theta, self.d_r) + elif issubclass(other_class, UnitSphericalCosLatDifferential): + self._check_base(base) + d_lon_coslat = self.d_phi * np.sin(base.theta) + return other_class(d_lon_coslat, -self.d_theta) + elif issubclass(other_class, RadialDifferential): + return other_class(self.d_r) + + return super(PhysicsSphericalDifferential, + self).represent_as(other_class, base) + + @classmethod + def from_representation(cls, representation, base=None): + # Other spherical differentials can be done without going to Cartesian, + # though we need base for the latitude to remove coslat. For that case, + # do the equivalent of cls._d_lon in SphericalDifferential. + if isinstance(representation, SphericalDifferential): + return cls(representation.d_lon, -representation.d_lat, + representation.d_distance) + elif isinstance(representation, SphericalCosLatDifferential): + cls._check_base(base) + d_phi = representation.d_lon_coslat / np.sin(base.theta) + return cls(d_phi, -representation.d_lat, representation.d_distance) + + return super(PhysicsSphericalDifferential, + cls).from_representation(representation, base) + + +class CylindricalDifferential(BaseDifferential): + """Differential(s) of points in cylindrical coordinates. + + Parameters + ---------- + d_rho : `~astropy.units.Quantity` + The differential cylindrical radius. + d_phi : `~astropy.units.Quantity` + The differential azimuth. + d_z : `~astropy.units.Quantity` + The differential height. + copy : bool, optional + If `True` (default), arrays will be copied rather than referenced. + """ + base_representation = CylindricalRepresentation + + def __init__(self, d_rho, d_phi, d_z, copy=False): + super(CylindricalDifferential, + self).__init__(d_rho, d_phi, d_z, copy=copy) + if not self._d_rho.unit.is_equivalent(self._d_z.unit): + raise u.UnitsError("d_rho and d_z should have equivalent units.") diff --git a/astropy/coordinates/setup_package.py b/astropy/coordinates/setup_package.py new file mode 100644 index 0000000..9b9f467 --- /dev/null +++ b/astropy/coordinates/setup_package.py @@ -0,0 +1,10 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + + +def get_package_data(): + return {'astropy.coordinates.tests.accuracy': ['*.csv'], + 'astropy.coordinates': ['data/*.dat', 'data/sites.json']} + + +def requires_2to3(): + return False diff --git a/astropy/coordinates/sites.py b/astropy/coordinates/sites.py new file mode 100644 index 0000000..243ad22 --- /dev/null +++ b/astropy/coordinates/sites.py @@ -0,0 +1,137 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Currently the only site accessible without internet access is the Royal +Greenwich Observatory, as an example (and for testing purposes). In future +releases, a canonical set of sites may be bundled into astropy for when the +online registry is unavailable. + +Additions or corrections to the observatory list can be submitted via Pull +Request to the [astropy-data GitHub repository](https://github.com/astropy/astropy-data), +updating the ``location.json`` file. +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import json +from difflib import get_close_matches +from collections import Mapping + +from ..utils.data import get_pkg_data_contents, get_file_contents +from .earth import EarthLocation +from .errors import UnknownSiteException +from .. import units as u + + +class SiteRegistry(Mapping): + """ + A bare-bones registry of EarthLocation objects. + + This acts as a mapping (dict-like object) but with the important caveat that + it's always transforms its inputs to lower-case. So keys are always all + lower-case, and even if you ask for something that's got mixed case, it will + be interpreted as the all lower-case version. + """ + def __init__(self): + # the keys to this are always lower-case + self._lowercase_names_to_locations = {} + # these can be whatever case is appropriate + self._names = [] + + def __getitem__(self, site_name): + """ + Returns an EarthLocation for a known site in this registry. + + Parameters + ---------- + site_name : str + Name of the observatory (case-insensitive). + + Returns + ------- + site : `~astropy.coordinates.EarthLocation` + The location of the observatory. + """ + if site_name.lower() not in self._lowercase_names_to_locations: + # If site name not found, find close matches and suggest them in error + close_names = get_close_matches(site_name, self._lowercase_names_to_locations) + close_names = sorted(close_names, key=len) + + raise UnknownSiteException(site_name, "the 'names' attribute", close_names=close_names) + + return self._lowercase_names_to_locations[site_name.lower()] + + def __len__(self): + return len(self._lowercase_names_to_locations) + + def __iter__(self): + return iter(self._lowercase_names_to_locations) + + def __contains__(self, site_name): + return site_name.lower() in self._lowercase_names_to_locations + + @property + def names(self): + """ + The names in this registry. Note that these are *not* exactly the same + as the keys: keys are always lower-case, while `names` is what you + should use for the actual readable names (which may be case-sensitive) + + Returns + ------- + site : list of str + The names of the sites in this registry + """ + return sorted(self._names) + + def add_site(self, names, locationobj): + """ + Adds a location to the registry. + + Parameters + ---------- + names : list of str + All the names this site should go under + locationobj : `~astropy.coordinates.EarthLocation` + The actual site object + """ + for name in names: + self._lowercase_names_to_locations[name.lower()] = locationobj + self._names.append(name) + + @classmethod + def from_json(cls, jsondb): + reg = cls() + for site in jsondb: + site_info = jsondb[site] + location = EarthLocation.from_geodetic(site_info['longitude'] * u.Unit(site_info['longitude_unit']), + site_info['latitude'] * u.Unit(site_info['latitude_unit']), + site_info['elevation'] * u.Unit(site_info['elevation_unit'])) + location.info.name = site_info['name'] + + reg.add_site([site] + site_info['aliases'], location) + reg._loaded_jsondb = jsondb + return reg + + +def get_builtin_sites(): + """ + Load observatory database from data/observatories.json and parse them into + a SiteRegistry. + """ + jsondb = json.loads(get_pkg_data_contents('data/sites.json')) + return SiteRegistry.from_json(jsondb) + + +def get_downloaded_sites(jsonurl=None): + """ + Load observatory database from data.astropy.org and parse into a SiteRegistry + """ + + if jsonurl is None: + content = get_pkg_data_contents('coordinates/sites.json') + else: + content = get_file_contents(jsonurl) + + jsondb = json.loads(content) + return SiteRegistry.from_json(jsondb) diff --git a/astropy/coordinates/sky_coordinate.py b/astropy/coordinates/sky_coordinate.py new file mode 100644 index 0000000..f29e5e6 --- /dev/null +++ b/astropy/coordinates/sky_coordinate.py @@ -0,0 +1,1911 @@ +from __future__ import (absolute_import, division, print_function, unicode_literals) + +import re +import copy +import warnings +import collections + +import numpy as np + +from ..utils.compat.misc import override__dir__ +from ..extern import six +from ..extern.six.moves import zip, range +from ..units import Unit, IrreducibleUnit +from .. import units as u +from ..constants import c as speed_of_light +from ..wcs.utils import skycoord_to_pixel, pixel_to_skycoord +from ..utils.exceptions import AstropyDeprecationWarning +from ..utils.data_info import MixinInfo +from ..utils import ShapedLikeNDArray + +from .distances import Distance +from .angles import Angle +from .baseframe import BaseCoordinateFrame, frame_transform_graph, GenericFrame, _get_repr_cls +from .builtin_frames import ICRS, SkyOffsetFrame +from .representation import (BaseRepresentation, SphericalRepresentation, + UnitSphericalRepresentation) + +__all__ = ['SkyCoord', 'SkyCoordInfo'] + +PLUS_MINUS_RE = re.compile(r'(\+|\-)') +J_PREFIXED_RA_DEC_RE = re.compile( + r"""J # J prefix + ([0-9]{6,7}\.?[0-9]{0,2}) # RA as HHMMSS.ss or DDDMMSS.ss, optional decimal digits + ([\+\-][0-9]{6}\.?[0-9]{0,2})\s*$ # Dec as DDMMSS.ss, optional decimal digits + """, re.VERBOSE) + + +class SkyCoordInfo(MixinInfo): + """ + Container for meta information like name, description, format. This is + required when the object is used as a mixin column within a table, but can + be used as a general way to store meta information. + """ + attrs_from_parent = set(['unit']) # Unit is read-only + _supports_indexing = False + + @staticmethod + def default_format(val): + repr_data = val.info._repr_data + formats = ['{0.' + compname + '.value:}' for compname + in repr_data.components] + return ','.join(formats).format(repr_data) + + @property + def unit(self): + repr_data = self._repr_data + unit = ','.join(str(getattr(repr_data, comp).unit) or 'None' + for comp in repr_data.components) + return unit + + @property + def _repr_data(self): + if self._parent is None: + return None + + sc = self._parent + if (issubclass(sc.representation, SphericalRepresentation) and + isinstance(sc.data, UnitSphericalRepresentation)): + repr_data = sc.represent_as(sc.data.__class__, in_frame_units=True) + else: + repr_data = sc.represent_as(sc.representation, in_frame_units=True) + return repr_data + + def _represent_as_dict(self): + obj = self._parent + attrs = (list(obj.representation_component_names) + + list(frame_transform_graph.frame_attributes.keys())) + + # Don't output distance if it is all unitless 1.0 + if 'distance' in attrs and np.all(obj.distance == 1.0): + attrs.remove('distance') + + self._represent_as_dict_attrs = attrs + + out = super(SkyCoordInfo, self)._represent_as_dict() + + out['representation'] = obj.representation.get_name() + out['frame'] = obj.frame.name + # Note that obj.info.unit is a fake composite unit (e.g. 'deg,deg,None' + # or None,None,m) and is not stored. The individual attributes have + # units. + + return out + + +class SkyCoord(ShapedLikeNDArray): + """High-level object providing a flexible interface for celestial coordinate + representation, manipulation, and transformation between systems. + + The `SkyCoord` class accepts a wide variety of inputs for initialization. At + a minimum these must provide one or more celestial coordinate values with + unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding + scalar or array coordinates (can be checked via ``SkyCoord.isscalar``). + Typically one also specifies the coordinate frame, though this is not + required. The general pattern for spherical representations is:: + + SkyCoord(COORD, [FRAME], keyword_args ...) + SkyCoord(LON, LAT, [FRAME], keyword_args ...) + SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...) + SkyCoord([FRAME], =LON, =LAT, keyword_args ...) + + It is also possible to input coordinate values in other representations + such as cartesian or cylindrical. In this case one includes the keyword + argument ``representation='cartesian'`` (for example) along with data in + ``x``, ``y``, and ``z``. + + Examples + -------- + The examples below illustrate common ways of initializing a `SkyCoord` + object. For a complete description of the allowed syntax see the + full coordinates documentation. First some imports:: + + >>> from astropy.coordinates import SkyCoord # High-level coordinates + >>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames + >>> from astropy.coordinates import Angle, Latitude, Longitude # Angles + >>> import astropy.units as u + + The coordinate values and frame specification can now be provided using + positional and keyword arguments:: + + >>> c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame + >>> c = SkyCoord([1, 2, 3], [-30, 45, 8], "icrs", unit="deg") # 3 coords + + >>> coords = ["1:12:43.2 +1:12:43", "1 12 43.2 +1 12 43"] + >>> c = SkyCoord(coords, FK4, unit=(u.deg, u.hourangle), obstime="J1992.21") + + >>> c = SkyCoord("1h12m43.2s +1d12m43s", Galactic) # Units from string + >>> c = SkyCoord("galactic", l="1h12m43.2s", b="+1d12m43s") + + >>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle + >>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity + >>> c = SkyCoord(ra, dec, frame='icrs') + >>> c = SkyCoord(ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56') + + >>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox + >>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults + + >>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic', representation='cartesian') + + >>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)]) + + As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame` + class or the corresponding string alias. The frame classes that are built in + to astropy are `ICRS`, `FK5`, `FK4`, `FK4NoETerms`, and `Galactic`. + The string aliases are simply lower-case versions of the class name, and + allow for creating a `SkyCoord` object and transforming frames without + explicitly importing the frame classes. + + Parameters + ---------- + frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional + Type of coordinate frame this `SkyCoord` should represent. Defaults to + to ICRS if not given or given as None. + unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional + Units for supplied ``LON`` and ``LAT`` values, respectively. If + only one unit is supplied then it applies to both ``LON`` and + ``LAT``. + obstime : valid `~astropy.time.Time` initializer, optional + Time of observation + equinox : valid `~astropy.time.Time` initializer, optional + Coordinate frame equinox + representation : str or Representation class + Specifies the representation, e.g. 'spherical', 'cartesian', or + 'cylindrical'. This affects the positional args and other keyword args + which must correspond to the given representation. + copy : bool, optional + If `True` (default), a copy of any coordinate data is made. This + argument can only be passed in as a keyword argument. + **keyword_args + Other keyword arguments as applicable for user-defined coordinate frames. + Common options include: + + ra, dec : valid `~astropy.coordinates.Angle` initializer, optional + RA and Dec for frames where ``ra`` and ``dec`` are keys in the + frame's ``representation_component_names``, including `ICRS`, + `FK5`, `FK4`, and `FK4NoETerms`. + l, b : valid `~astropy.coordinates.Angle` initializer, optional + Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are + keys in the frame's ``representation_component_names``, including + the `Galactic` frame. + x, y, z : float or `~astropy.units.Quantity`, optional + Cartesian coordinates values + u, v, w : float or `~astropy.units.Quantity`, optional + Cartesian coordinates values for the Galactic frame. + """ + + # Declare that SkyCoord can be used as a Table column by defining the + # info property. + info = SkyCoordInfo() + + def __init__(self, *args, **kwargs): + + # Parse the args and kwargs to assemble a sanitized and validated + # kwargs dict for initializing attributes for this object and for + # creating the internal self._sky_coord_frame object + args = list(args) # Make it mutable + copy = kwargs.pop('copy', True) + kwargs = self._parse_inputs(args, kwargs) + + frame = kwargs['frame'] + frame_attr_names = frame.get_frame_attr_names() + + # these are frame attributes set on this SkyCoord but *not* a part of + # the frame object this SkyCoord contains + self._extra_frameattr_names = set() + + for attr in kwargs: + if (attr not in frame_attr_names and + attr in frame_transform_graph.frame_attributes): + # Setting it will also validate it. + setattr(self, attr, kwargs[attr]) + + coord_kwargs = {} + if 'representation' in kwargs: + coord_kwargs['representation'] = _get_repr_cls(kwargs['representation']) + for attr, value in kwargs.items(): + if value is not None and (attr in frame.representation_component_names + or attr in frame.get_frame_attr_names()): + coord_kwargs[attr] = value + + # Finally make the internal coordinate object. + self._sky_coord_frame = frame.__class__(copy=copy, **coord_kwargs) + + if not self._sky_coord_frame.has_data: + raise ValueError('Cannot create a SkyCoord without data') + + @property + def frame(self): + return self._sky_coord_frame + + @property + def representation(self): + return self.frame.representation + + @representation.setter + def representation(self, value): + self.frame.representation = value + + @property + def shape(self): + return self.frame.shape + + def _apply(self, method, *args, **kwargs): + """Create a new instance, applying a method to the underlying data. + + In typical usage, the method is any of the shape-changing methods for + `~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those + picking particular elements (``__getitem__``, ``take``, etc.), which + are all defined in `~astropy.utils.misc.ShapedLikeNDArray`. It will be + applied to the underlying arrays in the representation (e.g., ``x``, + ``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`), + as well as to any frame attributes that have a shape, with the results + used to create a new instance. + + Internally, it is also used to apply functions to the above parts + (in particular, `~numpy.broadcast_to`). + + Parameters + ---------- + method : str or callable + If str, it is the name of a method that is applied to the internal + ``components``. If callable, the function is applied. + args : tuple + Any positional arguments for ``method``. + kwargs : dict + Any keyword arguments for ``method``. + """ + def apply_method(value): + if isinstance(value, ShapedLikeNDArray): + return value._apply(method, *args, **kwargs) + else: + if callable(method): + return method(value, *args, **kwargs) + else: + return getattr(value, method)(*args, **kwargs) + + self_frame = self._sky_coord_frame + try: + # First turn `self` into a mockup of the thing we want - we can copy + # this to get all the right attributes + self._sky_coord_frame = self_frame._apply(method, *args, **kwargs) + out = SkyCoord(self, representation=self.representation, copy=False) + for attr in self._extra_frameattr_names: + value = getattr(self, attr) + if getattr(value, 'size', 1) > 1: + value = apply_method(value) + elif method == 'copy' or method == 'flatten': + # flatten should copy also for a single element array, but + # we cannot use it directly for array scalars, since it + # always returns a one-dimensional array. So, just copy. + value = copy.copy(value) + setattr(out, '_' + attr, value) + + # Copy other 'info' attr only if it has actually been defined. + # See PR #3898 for further explanation and justification, along + # with Quantity.__array_finalize__ + if 'info' in self.__dict__: + out.info = self.info + + return out + finally: + # now put back the right frame in self + self._sky_coord_frame = self_frame + + def _parse_inputs(self, args, kwargs): + """ + Assemble a validated and sanitized keyword args dict for instantiating a + SkyCoord and coordinate object from the provided `args`, and `kwargs`. + """ + valid_kwargs = {} + + # Put the SkyCoord attributes like frame, equinox, obstime, location + # into valid_kwargs dict. `Frame` could come from args or kwargs, so + # set valid_kwargs['frame'] accordingly. The others must be specified + # by keyword args or else get a None default. Pop them off of kwargs + # in the process. + frame = valid_kwargs['frame'] = _get_frame(args, kwargs) + if 'representation' in kwargs: + valid_kwargs['representation'] = _get_repr_cls(kwargs.pop('representation')) + + for attr in frame_transform_graph.frame_attributes: + if attr in kwargs: + valid_kwargs[attr] = kwargs.pop(attr) + + # Get units + units = _get_units(args, kwargs) + + # Grab any frame-specific attr names like `ra` or `l` or `distance` from kwargs + # and migrate to valid_kwargs. + valid_kwargs.update(_get_representation_attrs(frame, units, kwargs)) + + # Error if anything is still left in kwargs + if kwargs: + + # TODO: remove this when velocities are supported in SkyCoord + vel_url = 'http://docs.astropy.org/en/stable/coordinates/velocities.html' + for k in kwargs: + if k.startswith('pm_') or k == 'radial_velocity': + raise ValueError('Velocity data is currently only supported' + ' in the coordinate frame objects, not in ' + 'SkyCoord. See the velocities ' + 'documentation page for more information: ' + '{0}'.format(vel_url)) + + raise ValueError('Unrecognized keyword argument(s) {0}' + .format(', '.join("'{0}'".format(key) for key in kwargs))) + + # Finally deal with the unnamed args. This figures out what the arg[0] is + # and returns a dict with appropriate key/values for initializing frame class. + if args: + if len(args) == 1: + # One arg which must be a coordinate. In this case + # coord_kwargs will contain keys like 'ra', 'dec', 'distance' + # along with any frame attributes like equinox or obstime which + # were explicitly specified in the coordinate object (i.e. non-default). + coord_kwargs = _parse_coordinate_arg(args[0], frame, units, kwargs) + + # Copy other 'info' attr only if it has actually been defined. + if 'info' in getattr(args[0], '__dict__', ()): + self.info = args[0].info + + elif len(args) <= 3: + frame_attr_names = frame.representation_component_names.keys() + repr_attr_names = frame.representation_component_names.values() + coord_kwargs = {} + for arg, frame_attr_name, repr_attr_name, unit in zip(args, frame_attr_names, + repr_attr_names, units): + attr_class = frame.representation.attr_classes[repr_attr_name] + coord_kwargs[frame_attr_name] = attr_class(arg, unit=unit) + + else: + raise ValueError('Must supply no more than three positional arguments, got {}' + .format(len(args))) + + # Copy the coord_kwargs into the final valid_kwargs dict. For each + # of the coord_kwargs ensure that there is no conflict with a value + # specified by the user in the original kwargs. + for attr, coord_value in coord_kwargs.items(): + if (attr in valid_kwargs + and valid_kwargs[attr] is not None + and np.any(valid_kwargs[attr] != coord_value)): + raise ValueError("Coordinate attribute '{0}'={1!r} conflicts with " + "keyword argument '{0}'={2!r}" + .format(attr, coord_value, valid_kwargs[attr])) + valid_kwargs[attr] = coord_value + + return valid_kwargs + + def transform_to(self, frame, merge_attributes=True): + """Transform this coordinate to a new frame. + + The precise frame transformed to depends on ``merge_attributes``. + If `False`, the destination frame is used exactly as passed in. + But this is often not quite what one wants. E.g., suppose one wants to + transform an ICRS coordinate that has an obstime attribute to FK4; in + this case, one likely would want to use this information. Thus, the + default for ``merge_attributes`` is `True`, in which the precedence is + as follows: (1) explicitly set (i.e., non-default) values in the + destination frame; (2) explicitly set values in the source; (3) default + value in the destination frame. + + Note that in either case, any explicitly set attributes on the source + `SkyCoord` that are not part of the destination frame's definition are + kept (stored on the resulting `SkyCoord`), and thus one can round-trip + (e.g., from FK4 to ICRS to FK4 without loosing obstime). + + Parameters + ---------- + frame : str, `BaseCoordinateFrame` class or instance, or `SkyCoord` instance + The frame to transform this coordinate into. If a `SkyCoord`, the + underlying frame is extracted, and all other information ignored. + merge_attributes : bool, optional + Whether the default attributes in the destination frame are allowed + to be overridden by explicitly set attributes in the source + (see note above; default: `True`). + + Returns + ------- + coord : `SkyCoord` + A new object with this coordinate represented in the `frame` frame. + + Raises + ------ + ValueError + If there is no possible transformation route. + + """ + from astropy.coordinates.errors import ConvertError + + frame_kwargs = {} + + # Frame name (string) or frame class? Coerce into an instance. + try: + frame = _get_frame_class(frame)() + except Exception: + pass + + if isinstance(frame, SkyCoord): + frame = frame.frame # Change to underlying coord frame instance + + if isinstance(frame, BaseCoordinateFrame): + new_frame_cls = frame.__class__ + # Get frame attributes, allowing defaults to be overridden by + # explicitly set attributes of the source if ``merge_attributes``. + for attr in frame_transform_graph.frame_attributes: + self_val = getattr(self, attr, None) + frame_val = getattr(frame, attr, None) + if (frame_val is not None and not + (merge_attributes and frame.is_frame_attr_default(attr))): + frame_kwargs[attr] = frame_val + elif (self_val is not None and + not self.is_frame_attr_default(attr)): + frame_kwargs[attr] = self_val + elif frame_val is not None: + frame_kwargs[attr] = frame_val + else: + raise ValueError('Transform `frame` must be a frame name, class, or instance') + + # Get the composite transform to the new frame + trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls) + if trans is None: + raise ConvertError('Cannot transform from {0} to {1}' + .format(self.frame.__class__, new_frame_cls)) + + # Make a generic frame which will accept all the frame kwargs that + # are provided and allow for transforming through intermediate frames + # which may require one or more of those kwargs. + generic_frame = GenericFrame(frame_kwargs) + + # Do the transformation, returning a coordinate frame of the desired + # final type (not generic). + new_coord = trans(self.frame, generic_frame) + + # Finally make the new SkyCoord object from the `new_coord` and + # remaining frame_kwargs that are not frame_attributes in `new_coord`. + for attr in (set(new_coord.get_frame_attr_names()) & + set(frame_kwargs.keys())): + frame_kwargs.pop(attr) + return self.__class__(new_coord, **frame_kwargs) + + def __getattr__(self, attr): + """ + Overrides getattr to return coordinates that this can be transformed + to, based on the alias attr in the master transform graph. + """ + if '_sky_coord_frame' in self.__dict__: + if self.frame.name == attr: + return self # Should this be a deepcopy of self? + + # Anything in the set of all possible frame_attr_names is handled + # here. If the attr is relevant for the current frame then delegate + # to self.frame otherwise get it from self._. + if attr in frame_transform_graph.frame_attributes: + if attr in self.frame.get_frame_attr_names(): + return getattr(self.frame, attr) + else: + return getattr(self, '_' + attr, None) + + # Some attributes might not fall in the above category but still + # are available through self._sky_coord_frame. + if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr): + return getattr(self._sky_coord_frame, attr) + + # Try to interpret as a new frame for transforming. + frame_cls = frame_transform_graph.lookup_name(attr) + if frame_cls is not None and self.frame.is_transformable_to(frame_cls): + return self.transform_to(attr) + + # Fail + raise AttributeError("'{0}' object has no attribute '{1}'" + .format(self.__class__.__name__, attr)) + + def __setattr__(self, attr, val): + # This is to make anything available through __getattr__ immutable + if '_sky_coord_frame' in self.__dict__: + if self.frame.name == attr: + raise AttributeError("'{0}' is immutable".format(attr)) + + if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr): + setattr(self._sky_coord_frame, attr, val) + return + + frame_cls = frame_transform_graph.lookup_name(attr) + if frame_cls is not None and self.frame.is_transformable_to(frame_cls): + raise AttributeError("'{0}' is immutable".format(attr)) + + if attr in frame_transform_graph.frame_attributes: + # All possible frame attributes can be set, but only via a private + # variable. See __getattr__ above. + super(SkyCoord, self).__setattr__('_' + attr, val) + # Validate it + frame_transform_graph.frame_attributes[attr].__get__(self) + # And add to set of extra attributes + self._extra_frameattr_names |= {attr} + + else: + # Otherwise, do the standard Python attribute setting + super(SkyCoord, self).__setattr__(attr, val) + + def __delattr__(self, attr): + # mirror __setattr__ above + if '_sky_coord_frame' in self.__dict__: + if self.frame.name == attr: + raise AttributeError("'{0}' is immutable".format(attr)) + + if not attr.startswith('_') and hasattr(self._sky_coord_frame, + attr): + delattr(self._sky_coord_frame, attr) + return + + frame_cls = frame_transform_graph.lookup_name(attr) + if frame_cls is not None and self.frame.is_transformable_to(frame_cls): + raise AttributeError("'{0}' is immutable".format(attr)) + + if attr in frame_transform_graph.frame_attributes: + # All possible frame attributes can be deleted, but need to remove + # the corresponding private variable. See __getattr__ above. + super(SkyCoord, self).__delattr__('_' + attr) + # Also remove it from the set of extra attributes + self._extra_frameattr_names -= {attr} + + else: + # Otherwise, do the standard Python attribute setting + super(SkyCoord, self).__delattr__(attr) + + @override__dir__ + def __dir__(self): + """ + Override the builtin `dir` behavior to include: + - Transforms available by aliases + - Attribute / methods of the underlying self.frame object + """ + + # determine the aliases that this can be transformed to. + dir_values = set() + for name in frame_transform_graph.get_names(): + frame_cls = frame_transform_graph.lookup_name(name) + if self.frame.is_transformable_to(frame_cls): + dir_values.add(name) + + # Add public attributes of self.frame + dir_values.update(set(attr for attr in dir(self.frame) if not attr.startswith('_'))) + + # Add all possible frame attributes + dir_values.update(frame_transform_graph.frame_attributes.keys()) + + return dir_values + + def __repr__(self): + clsnm = self.__class__.__name__ + coonm = self.frame.__class__.__name__ + frameattrs = self.frame._frame_attrs_repr() + if frameattrs: + frameattrs = ': ' + frameattrs + + data = self.frame._data_repr() + if data: + data = ': ' + data + + return '<{clsnm} ({coonm}{frameattrs}){data}>'.format(**locals()) + + def to_string(self, style='decimal', **kwargs): + """ + A string representation of the coordinates. + + The default styles definitions are:: + + 'decimal': 'lat': {'decimal': True, 'unit': "deg"} + 'lon': {'decimal': True, 'unit': "deg"} + 'dms': 'lat': {'unit': "deg"} + 'lon': {'unit': "deg"} + 'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': "deg"} + 'lon': {'pad': True, 'unit': "hour"} + + See :meth:`~astropy.coordinates.Angle.to_string` for details and + keyword arguments (the two angles forming the coordinates are are + both :class:`~astropy.coordinates.Angle` instances). Keyword + arguments have precedence over the style defaults and are passed + to :meth:`~astropy.coordinates.Angle.to_string`. + + Parameters + ---------- + style : {'hmsdms', 'dms', 'decimal'} + The formatting specification to use. These encode the three most + common ways to represent coordinates. The default is `decimal`. + kwargs + Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`. + """ + + sph_coord = self.frame.represent_as(SphericalRepresentation) + + styles = {'hmsdms': {'lonargs': {'unit': u.hour, 'pad': True}, + 'latargs': {'unit': u.degree, 'pad': True, 'alwayssign': True}}, + 'dms': {'lonargs': {'unit': u.degree}, + 'latargs': {'unit': u.degree}}, + 'decimal': {'lonargs': {'unit': u.degree, 'decimal': True}, + 'latargs': {'unit': u.degree, 'decimal': True}} + } + + lonargs = {} + latargs = {} + + if style in styles: + lonargs.update(styles[style]['lonargs']) + latargs.update(styles[style]['latargs']) + else: + raise ValueError('Invalid style. Valid options are: {0}'.format(",".join(styles))) + + lonargs.update(kwargs) + latargs.update(kwargs) + + if np.isscalar(sph_coord.lon.value): + coord_string = (sph_coord.lon.to_string(**lonargs) + + " " + + sph_coord.lat.to_string(**latargs)) + else: + coord_string = [] + for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()): + coord_string += [(lonangle.to_string(**lonargs) + + " " + + latangle.to_string(**latargs))] + if len(sph_coord.shape) > 1: + coord_string = np.array(coord_string).reshape(sph_coord.shape) + + return coord_string + + def is_equivalent_frame(self, other): + """ + Checks if this object's frame as the same as that of the ``other`` + object. + + To be the same frame, two objects must be the same frame class and have + the same frame attributes. For two `SkyCoord` objects, *all* of the + frame attributes have to match, not just those relevant for the object's + frame. + + Parameters + ---------- + other : SkyCoord or BaseCoordinateFrame + The other object to check. + + Returns + ------- + isequiv : bool + True if the frames are the same, False if not. + + Raises + ------ + TypeError + If ``other`` isn't a `SkyCoord` or a `BaseCoordinateFrame` or subclass. + """ + if isinstance(other, BaseCoordinateFrame): + return self.frame.is_equivalent_frame(other) + elif isinstance(other, SkyCoord): + if other.frame.name != self.frame.name: + return False + + for fattrnm in frame_transform_graph.frame_attributes: + if np.any(getattr(self, fattrnm) != getattr(other, fattrnm)): + return False + return True + else: + # not a BaseCoordinateFrame nor a SkyCoord object + raise TypeError("Tried to do is_equivalent_frame on something that " + "isn't frame-like") + + # High-level convenience methods + def separation(self, other): + """ + Computes on-sky separation between this coordinate and another. + + .. note:: + + If the ``other`` coordinate object is in a different frame, it is + first transformed to the frame of this object. This can lead to + unintutive behavior if not accounted for. Particularly of note is + that ``self.separation(other)`` and ``other.separation(self)`` may + not give the same answer in this case. + + For more on how to use this (and related) functionality, see the + examples in :doc:`/coordinates/matchsep`. + + Parameters + ---------- + other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` + The coordinate to get the separation to. + + Returns + ------- + sep : `~astropy.coordinates.Angle` + The on-sky separation between this and the ``other`` coordinate. + + Notes + ----- + The separation is calculated using the Vincenty formula, which + is stable at all locations, including poles and antipodes [1]_. + + .. [1] http://en.wikipedia.org/wiki/Great-circle_distance + + """ + from . import Angle + from .angle_utilities import angular_separation + + if not self.is_equivalent_frame(other): + try: + other = other.transform_to(self, merge_attributes=False) + except TypeError: + raise TypeError('Can only get separation to another SkyCoord ' + 'or a coordinate frame with data') + + lon1 = self.spherical.lon + lat1 = self.spherical.lat + lon2 = other.spherical.lon + lat2 = other.spherical.lat + + # Get the separation as a Quantity, convert to Angle in degrees + sep = angular_separation(lon1, lat1, lon2, lat2) + return Angle(sep, unit=u.degree) + + def separation_3d(self, other): + """ + Computes three dimensional separation between this coordinate + and another. + + For more on how to use this (and related) functionality, see the + examples in :doc:`/coordinates/matchsep`. + + Parameters + ---------- + other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` + The coordinate to get the separation to. + + Returns + ------- + sep : `~astropy.coordinates.Distance` + The real-space distance between these two coordinates. + + Raises + ------ + ValueError + If this or the other coordinate do not have distances. + """ + if not self.is_equivalent_frame(other): + try: + other = other.transform_to(self, merge_attributes=False) + except TypeError: + raise TypeError('Can only get separation to another SkyCoord ' + 'or a coordinate frame with data') + + if issubclass(self.data.__class__, UnitSphericalRepresentation): + raise ValueError('This object does not have a distance; cannot ' + 'compute 3d separation.') + if issubclass(other.data.__class__, UnitSphericalRepresentation): + raise ValueError('The other object does not have a distance; ' + 'cannot compute 3d separation.') + + return Distance((self.cartesian - other.cartesian).norm()) + + def spherical_offsets_to(self, tocoord): + r""" + Computes angular offsets to go *from* this coordinate *to* another. + + Parameters + ---------- + tocoord : `~astropy.coordinates.BaseCoordinateFrame` + The coordinate to offset to. + + Returns + ------- + lon_offset : `~astropy.coordinates.Angle` + The angular offset in the longitude direction (i.e., RA for + equatorial coordinates). + lat_offset : `~astropy.coordinates.Angle` + The angular offset in the latitude direction (i.e., Dec for + equatorial coordinates). + + Raises + ------ + ValueError + If the ``tocoord`` is not in the same frame as this one. This is + different from the behavior of the `separation`/`separation_3d` + methods because the offset components depend critically on the + specific choice of frame. + + Notes + ----- + This uses the sky offset frame machinery, and hence will produce a new + sky offset frame if one does not already exist for this object's frame + class. + + See Also + -------- + separation : for the *total* angular offset (not broken out into components) + + """ + if not self.is_equivalent_frame(tocoord): + raise ValueError('Tried to use spherical_offsets_to with two non-matching frames!') + + aframe = self.skyoffset_frame() + acoord = tocoord.transform_to(aframe) + + dlon = acoord.spherical.lon.view(Angle) + dlat = acoord.spherical.lat.view(Angle) + return dlon, dlat + + def match_to_catalog_sky(self, catalogcoord, nthneighbor=1): + """ + Finds the nearest on-sky matches of this coordinate in a set of + catalog coordinates. + + For more on how to use this (and related) functionality, see the + examples in :doc:`/coordinates/matchsep`. + + Parameters + ---------- + catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` + The base catalog in which to search for matches. Typically this + will be a coordinate object that is an array (i.e., + ``catalogcoord.isscalar == False``) + nthneighbor : int, optional + Which closest neighbor to search for. Typically ``1`` is + desired here, as that is correct for matching one set of + coordinates to another. The next likely use case is ``2``, + for matching a coordinate catalog against *itself* (``1`` + is inappropriate because each point will find itself as the + closest match). + + Returns + ------- + idx : integer array + Indices into ``catalogcoord`` to get the matched points for + each of this object's coordinates. Shape matches this + object. + sep2d : `~astropy.coordinates.Angle` + The on-sky separation between the closest match for each + element in this object in ``catalogcoord``. Shape matches + this object. + dist3d : `~astropy.units.Quantity` + The 3D distance between the closest match for each element + in this object in ``catalogcoord``. Shape matches this + object. + + Notes + ----- + This method requires `SciPy `_ to be + installed or it will fail. + + See Also + -------- + astropy.coordinates.match_coordinates_sky + SkyCoord.match_to_catalog_3d + """ + from .matching import match_coordinates_sky + + if (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame)) + and catalogcoord.has_data): + self_in_catalog_frame = self.transform_to(catalogcoord) + else: + raise TypeError('Can only get separation to another SkyCoord or a ' + 'coordinate frame with data') + + res = match_coordinates_sky(self_in_catalog_frame, catalogcoord, + nthneighbor=nthneighbor, + storekdtree='_kdtree_sky') + return res + + def match_to_catalog_3d(self, catalogcoord, nthneighbor=1): + """ + Finds the nearest 3-dimensional matches of this coordinate to a set + of catalog coordinates. + + This finds the 3-dimensional closest neighbor, which is only different + from the on-sky distance if ``distance`` is set in this object or the + ``catalogcoord`` object. + + For more on how to use this (and related) functionality, see the + examples in :doc:`/coordinates/matchsep`. + + Parameters + ---------- + catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` + The base catalog in which to search for matches. Typically this + will be a coordinate object that is an array (i.e., + ``catalogcoord.isscalar == False``) + nthneighbor : int, optional + Which closest neighbor to search for. Typically ``1`` is + desired here, as that is correct for matching one set of + coordinates to another. The next likely use case is + ``2``, for matching a coordinate catalog against *itself* + (``1`` is inappropriate because each point will find + itself as the closest match). + + Returns + ------- + idx : integer array + Indices into ``catalogcoord`` to get the matched points for + each of this object's coordinates. Shape matches this + object. + sep2d : `~astropy.coordinates.Angle` + The on-sky separation between the closest match for each + element in this object in ``catalogcoord``. Shape matches + this object. + dist3d : `~astropy.units.Quantity` + The 3D distance between the closest match for each element + in this object in ``catalogcoord``. Shape matches this + object. + + Notes + ----- + This method requires `SciPy `_ to be + installed or it will fail. + + See Also + -------- + astropy.coordinates.match_coordinates_3d + SkyCoord.match_to_catalog_sky + """ + from .matching import match_coordinates_3d + + if (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame)) + and catalogcoord.has_data): + self_in_catalog_frame = self.transform_to(catalogcoord) + else: + raise TypeError('Can only get separation to another SkyCoord or a ' + 'coordinate frame with data') + + res = match_coordinates_3d(self_in_catalog_frame, catalogcoord, + nthneighbor=nthneighbor, + storekdtree='_kdtree_3d') + + return res + + def search_around_sky(self, searcharoundcoords, seplimit): + """ + Searches for all coordinates in this object around a supplied set of + points within a given on-sky separation. + + This is intended for use on `~astropy.coordinates.SkyCoord` objects + with coordinate arrays, rather than a scalar coordinate. For a scalar + coordinate, it is better to use + `~astropy.coordinates.SkyCoord.separation`. + + For more on how to use this (and related) functionality, see the + examples in :doc:`/coordinates/matchsep`. + + Parameters + ---------- + searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` + The coordinates to search around to try to find matching points in + this `SkyCoord`. This should be an object with array coordinates, + not a scalar coordinate object. + seplimit : `~astropy.units.Quantity` with angle units + The on-sky separation to search within. + + Returns + ------- + idxsearcharound : integer array + Indices into ``self`` that matches to the corresponding element of + ``idxself``. Shape matches ``idxself``. + idxself : integer array + Indices into ``searcharoundcoords`` that matches to the + corresponding element of ``idxsearcharound``. Shape matches + ``idxsearcharound``. + sep2d : `~astropy.coordinates.Angle` + The on-sky separation between the coordinates. Shape matches + ``idxsearcharound`` and ``idxself``. + dist3d : `~astropy.units.Quantity` + The 3D distance between the coordinates. Shape matches + ``idxsearcharound`` and ``idxself``. + + Notes + ----- + This method requires `SciPy `_ (>=0.12.0) to be + installed or it will fail. + + In the current implementation, the return values are always sorted in + the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is + in ascending order). This is considered an implementation detail, + though, so it could change in a future release. + + See Also + -------- + astropy.coordinates.search_around_sky + SkyCoord.search_around_3d + """ + from .matching import search_around_sky + + return search_around_sky(searcharoundcoords, self, seplimit, + storekdtree='_kdtree_sky') + + def search_around_3d(self, searcharoundcoords, distlimit): + """ + Searches for all coordinates in this object around a supplied set of + points within a given 3D radius. + + This is intended for use on `~astropy.coordinates.SkyCoord` objects + with coordinate arrays, rather than a scalar coordinate. For a scalar + coordinate, it is better to use + `~astropy.coordinates.SkyCoord.separation_3d`. + + For more on how to use this (and related) functionality, see the + examples in :doc:`/coordinates/matchsep`. + + Parameters + ---------- + searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame` + The coordinates to search around to try to find matching points in + this `SkyCoord`. This should be an object with array coordinates, + not a scalar coordinate object. + distlimit : `~astropy.units.Quantity` with distance units + The physical radius to search within. + + Returns + ------- + idxsearcharound : integer array + Indices into ``self`` that matches to the corresponding element of + ``idxself``. Shape matches ``idxself``. + idxself : integer array + Indices into ``searcharoundcoords`` that matches to the + corresponding element of ``idxsearcharound``. Shape matches + ``idxsearcharound``. + sep2d : `~astropy.coordinates.Angle` + The on-sky separation between the coordinates. Shape matches + ``idxsearcharound`` and ``idxself``. + dist3d : `~astropy.units.Quantity` + The 3D distance between the coordinates. Shape matches + ``idxsearcharound`` and ``idxself``. + + Notes + ----- + This method requires `SciPy `_ (>=0.12.0) to be + installed or it will fail. + + In the current implementation, the return values are always sorted in + the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is + in ascending order). This is considered an implementation detail, + though, so it could change in a future release. + + See Also + -------- + astropy.coordinates.search_around_3d + SkyCoord.search_around_sky + """ + from .matching import search_around_3d + + return search_around_3d(searcharoundcoords, self, distlimit, + storekdtree='_kdtree_3d') + + def position_angle(self, other): + """ + Computes the on-sky position angle (East of North) between this + `SkyCoord` and another. + + Parameters + ---------- + other : `SkyCoord` + The other coordinate to compute the position angle to. It is + treated as the "head" of the vector of the position angle. + + Returns + ------- + pa : `~astropy.coordinates.Angle` + The (positive) position angle of the vector pointing from ``self`` + to ``other``. If either ``self`` or ``other`` contain arrays, this + will be an array following the appropriate `numpy` broadcasting + rules. + + Examples + -------- + + >>> c1 = SkyCoord(0*u.deg, 0*u.deg) + >>> c2 = SkyCoord(1*u.deg, 0*u.deg) + >>> c1.position_angle(c2).degree + 90.0 + >>> c3 = SkyCoord(1*u.deg, 1*u.deg) + >>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP + 44.995636455344844 + """ + from . import angle_utilities + + if not self.is_equivalent_frame(other): + try: + other = other.transform_to(self, merge_attributes=False) + except TypeError: + raise TypeError('Can only get position_angle to another ' + 'SkyCoord or a coordinate frame with data') + + slat = self.represent_as(UnitSphericalRepresentation).lat + slon = self.represent_as(UnitSphericalRepresentation).lon + olat = other.represent_as(UnitSphericalRepresentation).lat + olon = other.represent_as(UnitSphericalRepresentation).lon + + return angle_utilities.position_angle(slon, slat, olon, olat) + + def skyoffset_frame(self, rotation=None): + """ + Returns the sky offset frame with this `SkyCoord` at the origin. + + Returns + ------- + astrframe : `~astropy.coordinates.SkyOffsetFrame` + A sky offset frame of the same type as this `SkyCoord` (e.g., if + this object has an ICRS coordinate, the resulting frame is + SkyOffsetICRS, with the origin set to this object) + rotation : `~astropy.coordinates.Angle` or `~astropy.units.Quantity` with angle units + The final rotation of the frame about the ``origin``. The sign of + the rotation is the left-hand rule. That is, an object at a + particular position angle in the un-rotated system will be sent to + the positive latitude (z) direction in the final frame. + """ + return SkyOffsetFrame(origin=self, rotation=rotation) + + def get_constellation(self, short_name=False, constellation_list='iau'): + """ + Determines the constellation(s) of the coordinates this `SkyCoord` + contains. + + Parameters + ---------- + short_name : bool + If True, the returned names are the IAU-sanctioned abbreviated + names. Otherwise, full names for the constellations are used. + constellation_list : str + The set of constellations to use. Currently only ``'iau'`` is + supported, meaning the 88 "modern" constellations endorsed by the IAU. + + Returns + ------- + constellation : str or string array + If this is a scalar coordinate, returns the name of the + constellation. If it is an array `SkyCoord`, it returns an array of + names. + + Notes + ----- + To determine which constellation a point on the sky is in, this first + precesses to B1875, and then uses the Delporte boundaries of the 88 + modern constellations, as tabulated by + `Roman 1987 `_. + + See Also + -------- + astropy.coordinates.get_constellation + """ + from .funcs import get_constellation + + return get_constellation(self, short_name, constellation_list) + + # WCS pixel to/from sky conversions + def to_pixel(self, wcs, origin=0, mode='all'): + """ + Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS` + object. + + Parameters + ---------- + wcs : `~astropy.wcs.WCS` + The WCS to use for convert + origin : int + Whether to return 0 or 1-based pixel coordinates. + mode : 'all' or 'wcs' + Whether to do the transformation including distortions (``'all'``) or + only including only the core WCS transformation (``'wcs'``). + + Returns + ------- + xp, yp : `numpy.ndarray` + The pixel coordinates + + See Also + -------- + astropy.wcs.utils.skycoord_to_pixel : the implementation of this method + """ + return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode) + + @classmethod + def from_pixel(cls, xp, yp, wcs, origin=0, mode='all'): + """ + Create a new `SkyCoord` from pixel coordinates using an + `~astropy.wcs.WCS` object. + + Parameters + ---------- + xp, yp : float or `numpy.ndarray` + The coordinates to convert. + wcs : `~astropy.wcs.WCS` + The WCS to use for convert + origin : int + Whether to return 0 or 1-based pixel coordinates. + mode : 'all' or 'wcs' + Whether to do the transformation including distortions (``'all'``) or + only including only the core WCS transformation (``'wcs'``). + + Returns + ------- + coord : an instance of this class + A new object with sky coordinates corresponding to the input ``xp`` + and ``yp``. + + See Also + -------- + to_pixel : to do the inverse operation + astropy.wcs.utils.pixel_to_skycoord : the implementation of this method + """ + return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls) + + def radial_velocity_correction(self, kind='barycentric', obstime=None, + location=None): + """ + Compute the correction required to convert a radial velocity at a given + time and place on the Earth's Surface to a barycentric or heliocentric + velocity. + + Parameters + ---------- + kind : str + The kind of velocity correction. Must be 'barycentric' or + 'heliocentric'. + obstime : `~astropy.time.Time` or None, optional + The time at which to compute the correction. If `None`, the + ``obstime`` frame attribute on the `SkyCoord` will be used. + location : `~astropy.coordinates.EarthLocation` or None, optional + The observer location at which to compute the correction. If + `None`, the ``location`` frame attribute on the passed-in + ``obstime`` will be used, and if that is None, the ``location`` + frame attribute on the `SkyCoord` will be used. + + Raises + ------ + ValueError + If either ``obstime`` or ``location`` are passed in (not ``None``) + when the frame attribute is already set on this `SkyCoord`. + TypeError + If ``obstime`` or ``location`` aren't provided, either as arguments + or as frame attributes. + + Returns + ------- + vcorr : `~astropy.units.Quantity` with velocity units + The correction with a positive sign. I.e., *add* this + to an observed radial velocity to get the barycentric (or + heliocentric) velocity. If m/s precision or better is needed, + see the notes below. + + Notes + ----- + The barycentric correction is calculated to higher precision than the + heliocentric correction and includes additional physics (e.g time dilation). + Use barycentric corrections if m/s precision is required. + + The algorithm here is sufficient to perform corrections at the mm/s level, but + care is needed in application. Strictly speaking, the barycentric correction is + multiplicative and should be applied as:: + + sc = SkyCoord(1*u.deg, 2*u.deg) + vcorr = sc.rv_correction(kind='barycentric', obstime=t, location=loc) + rv = rv + vcorr + rv * vcorr / consts.c + + If your target is nearby and/or has finite proper motion you may need to account + for terms arising from this. See Wright & Eastmann (2014) for details. + + The default is for this method to use the builtin ephemeris for + computing the sun and earth location. Other ephemerides can be chosen + by setting the `~astropy.coordinates.solar_system_ephemeris` variable, + either directly or via ``with`` statement. For example, to use the JPL + ephemeris, do:: + + sc = SkyCoord(1*u.deg, 2*u.deg) + with coord.solar_system_ephemeris.set('jpl'): + rv += sc.rv_correction(obstime=t, location=loc) + + """ + # has to be here to prevent circular imports + from .solar_system import get_body_barycentric_posvel, get_body_barycentric + + # location validation + timeloc = getattr(obstime, 'location', None) + if location is None: + if self.location is not None: + location = self.location + if timeloc is not None: + raise ValueError('`location` cannot be in both the ' + 'passed-in `obstime` and this `SkyCoord` ' + 'because it is ambiguous which is meant ' + 'for the radial_velocity_correction.') + elif timeloc is not None: + location = timeloc + else: + raise TypeError('Must provide a `location` to ' + 'radial_velocity_correction, either as a ' + 'SkyCoord frame attribute, as an attribute on ' + 'the passed in `obstime`, or in the method ' + 'call.') + + elif self.location is not None or timeloc is not None: + raise ValueError('Cannot compute radial velocity correction if ' + '`location` argument is passed in and there is ' + 'also a `location` attribute on this SkyCoord or ' + 'the passed-in `obstime`.') + + # obstime validation + if obstime is None: + obstime = self.obstime + if obstime is None: + raise TypeError('Must provide an `obstime` to ' + 'radial_velocity_correction, either as a ' + 'SkyCoord frame attribute or in the method ' + 'call.') + elif self.obstime is not None: + raise ValueError('Cannot compute radial velocity correction if ' + '`obstime` argument is passed in and it is ' + 'inconsistent with the `obstime` frame ' + 'attribute on the SkyCoord') + + pos_earth, v_earth = get_body_barycentric_posvel('earth', obstime) + if kind == 'barycentric': + v_origin_to_earth = v_earth + elif kind == 'heliocentric': + v_sun = get_body_barycentric_posvel('sun', obstime)[1] + v_origin_to_earth = v_earth - v_sun + else: + raise ValueError("`kind` argument to radial_velocity_correction must " + "be 'barycentric' or 'heliocentric', but got " + "'{}'".format(kind)) + + gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime) + # transforming to GCRS is not the correct thing to do here, since we don't want to + # include aberration (or light deflection)? Instead, only apply parallax if necessary + if self.data.__class__ is UnitSphericalRepresentation: + targcart = self.icrs.cartesian + else: + # skycoord has distances so apply parallax + obs_icrs_cart = pos_earth + gcrs_p + icrs_cart = self.icrs.cartesian + targcart = icrs_cart - obs_icrs_cart + targcart /= targcart.norm() + + if kind == 'barycentric': + beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light + gamma_obs = 1 / np.sqrt(1 - beta_obs.norm()**2) + gr = location._gravitational_redshift(obstime) + # barycentric redshift according to eq 28 in Wright & Eastmann (2014), + # neglecting Shapiro delay and effects of the star's own motion + zb = gamma_obs * (1 + targcart.dot(beta_obs)) / (1 + gr/speed_of_light) - 1 + return zb * speed_of_light + else: + # do a simpler correction ignoring time dilation and gravitational redshift + # this is adequate since Heliocentric corrections shouldn't be used if + # cm/s precision is required. + return targcart.dot(v_origin_to_earth + gcrs_v) + + # Table interactions + @classmethod + def guess_from_table(cls, table, **coord_kwargs): + r""" + A convenience method to create and return a new `SkyCoord` from the data + in an astropy Table. + + This method matches table columns that start with the case-insensitive + names of the the components of the requested frames, if they are also + followed by a non-alphanumeric character. It will also match columns + that *end* with the component name if a non-alphanumeric character is + *before* it. + + For example, the first rule means columns with names like + ``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for + `~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'`` + are *not*. Similarly, the second rule applied to the + `~astropy.coordinates.Galactic` frame means that a column named + ``'gal_l'`` will be used as the the ``l`` component, but ``gall`` or + ``'fill'`` will not. + + The definition of alphanumeric here is based on Unicode's definition + of alphanumeric, except without ``_`` (which is normally considered + alphanumeric). So for ASCII, this means the non-alphanumeric characters + are ``_!"#$%&'()*+,-./\:;<=>?@[]^`{|}~``). + + Parameters + ---------- + table : astropy.Table + The table to load data from. + coord_kwargs + Any additional keyword arguments are passed directly to this class's + constructor. + + Returns + ------- + newsc : same as this class + The new `SkyCoord` (or subclass) object. + """ + inital_frame = coord_kwargs.get('frame') + frame = _get_frame([], coord_kwargs) + coord_kwargs['frame'] = inital_frame + + comp_kwargs = {} + for comp_name in frame.representation_component_names: + # this matches things like 'ra[...]'' but *not* 'rad'. + # note that the "_" must be in there explicitly, because + # "alphanumeric" usually includes underscores. + starts_with_comp = comp_name + r'(\W|\b|_)' + # this part matches stuff like 'center_ra', but *not* + # 'aura' + ends_with_comp = r'.*(\W|\b|_)' + comp_name + r'\b' + # the final regex ORs together the two patterns + rex = re.compile('(' + starts_with_comp + ')|(' + ends_with_comp + ')', + re.IGNORECASE | re.UNICODE) + + for col_name in table.colnames: + if rex.match(col_name): + if comp_name in comp_kwargs: + oldname = comp_kwargs[comp_name].name + msg = ('Found at least two matches for component "{0}"' + ': "{1}" and "{2}". Cannot continue with this ' + 'ambiguity.') + raise ValueError(msg.format(comp_name, oldname, col_name)) + comp_kwargs[comp_name] = table[col_name] + + for k, v in comp_kwargs.items(): + if k in coord_kwargs: + raise ValueError('Found column "{0}" in table, but it was ' + 'already provided as "{1}" keyword to ' + 'guess_from_table function.'.format(v.name, k)) + else: + coord_kwargs[k] = v + + return cls(**coord_kwargs) + + # Name resolve + @classmethod + def from_name(cls, name, frame='icrs'): + """ + Given a name, query the CDS name resolver to attempt to retrieve + coordinate information for that object. The search database, sesame + url, and query timeout can be set through configuration items in + ``astropy.coordinates.name_resolve`` -- see docstring for + `~astropy.coordinates.get_icrs_coordinates` for more + information. + + Parameters + ---------- + name : str + The name of the object to get coordinates for, e.g. ``'M42'``. + frame : str or `BaseCoordinateFrame` class or instance + The frame to transform the object to. + + Returns + ------- + coord : SkyCoord + Instance of the SkyCoord class. + """ + + from .name_resolve import get_icrs_coordinates + + icrs_coord = get_icrs_coordinates(name) + icrs_sky_coord = cls(icrs_coord) + if frame in ('icrs', icrs_coord.__class__): + return icrs_sky_coord + else: + return icrs_sky_coord.transform_to(frame) + + +# <----------------Private utility functions below here-------------------------> + + +def _get_frame_class(frame): + """ + Get a frame class from the input `frame`, which could be a frame name + string, or frame class. + """ + import inspect + + if isinstance(frame, six.string_types): + frame_names = frame_transform_graph.get_names() + if frame not in frame_names: + raise ValueError('Coordinate frame {0} not in allowed values {1}' + .format(frame, sorted(frame_names))) + frame_cls = frame_transform_graph.lookup_name(frame) + + elif inspect.isclass(frame) and issubclass(frame, BaseCoordinateFrame): + frame_cls = frame + + else: + raise ValueError('Coordinate frame must be a frame name or frame class') + + return frame_cls + + +def _get_frame(args, kwargs): + """ + Determine the coordinate frame from input SkyCoord args and kwargs. This + modifies args and/or kwargs in-place to remove the item that provided + `frame`. It also infers the frame if an input coordinate was provided and + checks for conflicts. + + This allows for frame to be specified as a string like 'icrs' or a frame + class like ICRS, but not an instance ICRS() since the latter could have + non-default representation attributes which would require a three-way merge. + """ + frame = kwargs.pop('frame', None) + + if frame is None and len(args) > 1: + + # We do not allow frames to be passed as positional arguments if data + # is passed separately from frame. + + for arg in args: + + if isinstance(arg, (SkyCoord, BaseCoordinateFrame)): + raise ValueError("{0} instance cannot be passed as a positional " + "argument for the frame, pass it using the " + "frame= keyword instead.".format(arg.__class__.__name__)) + + # If the frame is an instance or SkyCoord, we split up the attributes and + # make it into a class. + + if isinstance(frame, SkyCoord): + # Copy any extra attributes if they are not explicitly given. + for attr in frame._extra_frameattr_names: + kwargs.setdefault(attr, getattr(frame, attr)) + frame = frame.frame + + if isinstance(frame, BaseCoordinateFrame): + + for attr in frame.get_frame_attr_names(): + if attr in kwargs: + raise ValueError("cannot specify frame attribute '{0}' directly in SkyCoord since a frame instance was passed in".format(attr)) + else: + kwargs[attr] = getattr(frame, attr) + + frame = frame.__class__ + + if frame is not None: + # Frame was provided as kwarg so validate and coerce into corresponding frame. + frame_cls = _get_frame_class(frame) + frame_specified_explicitly = True + else: + # Look for the frame in args + for arg in args: + try: + frame_cls = _get_frame_class(arg) + frame_specified_explicitly = True + except ValueError: + pass + else: + args.remove(arg) + warnings.warn("Passing a frame as a positional argument is now " + "deprecated, use the frame= keyword argument " + "instead.", AstropyDeprecationWarning) + break + else: + # Not in args nor kwargs - default to icrs + frame_cls = ICRS + frame_specified_explicitly = False + + # Check that the new frame doesn't conflict with existing coordinate frame + # if a coordinate is supplied in the args list. If the frame still had not + # been set by this point and a coordinate was supplied, then use that frame. + for arg in args: + # this catches the "single list passed in" case. For that case we want + # to allow the first argument to set the class. That's OK because + # _parse_coordinate_arg goes and checks that the frames match between + # the first and all the others + if (isinstance(arg, (collections.Sequence, np.ndarray)) and + len(args) == 1 and len(arg) > 0): + arg = arg[0] + + coord_frame_cls = None + if isinstance(arg, BaseCoordinateFrame): + coord_frame_cls = arg.__class__ + elif isinstance(arg, SkyCoord): + coord_frame_cls = arg.frame.__class__ + + if coord_frame_cls is not None: + if not frame_specified_explicitly: + frame_cls = coord_frame_cls + elif frame_cls is not coord_frame_cls: + raise ValueError("Cannot override frame='{0}' of input coordinate with " + "new frame='{1}'. Instead transform the coordinate." + .format(coord_frame_cls.__name__, frame_cls.__name__)) + + if 'representation' in kwargs: + frame = frame_cls(representation=_get_repr_cls(kwargs['representation'])) + else: + frame = frame_cls() + + return frame + + +def _get_units(args, kwargs): + """ + Get the longitude unit and latitude unit from kwargs. Possible enhancement + is to allow input from args as well. + """ + if 'unit' not in kwargs: + units = [None, None, None] + + else: + units = kwargs.pop('unit') + + if isinstance(units, six.string_types): + units = [x.strip() for x in units.split(',')] + # Allow for input like unit='deg' or unit='m' + if len(units) == 1: + units = [units[0], units[0], units[0]] + elif isinstance(units, (Unit, IrreducibleUnit)): + units = [units, units, units] + + try: + units = [(Unit(x) if x else None) for x in units] + units.extend(None for x in range(3 - len(units))) + if len(units) > 3: + raise ValueError() + except Exception: + raise ValueError('Unit keyword must have one to three unit values as ' + 'tuple or comma-separated string') + + return units + + +def _parse_coordinate_arg(coords, frame, units, init_kwargs): + """ + Single unnamed arg supplied. This must be: + - Coordinate frame with data + - Representation + - SkyCoord + - List or tuple of: + - String which splits into two values + - Iterable with two values + - SkyCoord, frame, or representation objects. + + Returns a dict mapping coordinate attribute names to values (or lists of + values) + """ + is_scalar = False # Differentiate between scalar and list input + valid_kwargs = {} # Returned dict of lon, lat, and distance (optional) + + frame_attr_names = frame.representation_component_names.keys() + repr_attr_names = frame.representation_component_names.values() + repr_attr_classes = frame.representation.attr_classes.values() + n_attr_names = len(repr_attr_names) + + # Turn a single string into a list of strings for convenience + if isinstance(coords, six.string_types): + is_scalar = True + coords = [coords] + + if isinstance(coords, (SkyCoord, BaseCoordinateFrame)): + # Note that during parsing of `frame` it is checked that any coordinate + # args have the same frame as explicitly supplied, so don't worry here. + + if not coords.has_data: + raise ValueError('Cannot initialize from a frame without coordinate data') + + data = coords.data.represent_as(frame.representation) + + values = [] # List of values corresponding to representation attrs + for repr_attr_name in repr_attr_names: + # If coords did not have an explicit distance then don't include in initializers. + if (isinstance(coords.data, UnitSphericalRepresentation) and + repr_attr_name == 'distance'): + continue + + # Get the value from `data` in the eventual representation + values.append(getattr(data, repr_attr_name)) + + for attr in frame_transform_graph.frame_attributes: + value = getattr(coords, attr, None) + use_value = (isinstance(coords, SkyCoord) + or attr not in coords._attr_names_with_defaults) + if use_value and value is not None: + valid_kwargs[attr] = value + + elif isinstance(coords, BaseRepresentation): + data = coords.represent_as(frame.representation) + values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names] + + elif (isinstance(coords, np.ndarray) and coords.dtype.kind in 'if' + and coords.ndim == 2 and coords.shape[1] <= 3): + # 2-d array of coordinate values. Handle specially for efficiency. + values = coords.transpose() # Iterates over repr attrs + + elif isinstance(coords, (collections.Sequence, np.ndarray)): + # Handles list-like input. + + vals = [] + is_ra_dec_representation = ('ra' in frame.representation_component_names and + 'dec' in frame.representation_component_names) + coord_types = (SkyCoord, BaseCoordinateFrame, BaseRepresentation) + if any(isinstance(coord, coord_types) for coord in coords): + # this parsing path is used when there are coordinate-like objects + # in the list - instead of creating lists of values, we create + # SkyCoords from the list elements and then combine them. + scs = [SkyCoord(coord, **init_kwargs) for coord in coords] + + # Check that all frames are equivalent + for sc in scs[1:]: + if not sc.is_equivalent_frame(scs[0]): + raise ValueError("List of inputs don't have equivalent " + "frames: {0} != {1}".format(sc, scs[0])) + + # Now use the first to determine if they are all UnitSpherical + allunitsphrepr = isinstance(scs[0].data, UnitSphericalRepresentation) + + # get the frame attributes from the first coord in the list, because + # from the above we know it matches all the others. First copy over + # the attributes that are in the frame itself, then copy over any + # extras in the SkyCoord + for fattrnm in scs[0].frame.frame_attributes: + valid_kwargs[fattrnm] = getattr(scs[0].frame, fattrnm) + for fattrnm in scs[0]._extra_frameattr_names: + valid_kwargs[fattrnm] = getattr(scs[0], fattrnm) + + # Now combine the values, to be used below + values = [] + for data_attr_name, repr_attr_name in zip(frame_attr_names, repr_attr_names): + if allunitsphrepr and repr_attr_name == 'distance': + # if they are *all* UnitSpherical, don't give a distance + continue + data_vals = [] + for sc in scs: + data_val = getattr(sc, data_attr_name) + data_vals.append(data_val.reshape(1,) if sc.isscalar else data_val) + concat_vals = np.concatenate(data_vals) + # Hack because np.concatenate doesn't fully work with Quantity + if isinstance(concat_vals, u.Quantity): + concat_vals._unit = data_val.unit + values.append(concat_vals) + else: + # none of the elements are "frame-like" + # turn into a list of lists like [[v1_0, v2_0, v3_0], ... [v1_N, v2_N, v3_N]] + for coord in coords: + if isinstance(coord, six.string_types): + coord1 = coord.split() + if len(coord1) == 6: + coord = (' '.join(coord1[:3]), ' '.join(coord1[3:])) + elif is_ra_dec_representation: + coord = _parse_ra_dec(coord) + else: + coord = coord1 + vals.append(coord) # Assumes coord is a sequence at this point + + # Do some basic validation of the list elements: all have a length and all + # lengths the same + try: + n_coords = sorted(set(len(x) for x in vals)) + except Exception: + raise ValueError('One or more elements of input sequence does not have a length') + + if len(n_coords) > 1: + raise ValueError('Input coordinate values must have same number of elements, found {0}' + .format(n_coords)) + n_coords = n_coords[0] + + # Must have no more coord inputs than representation attributes + if n_coords > n_attr_names: + raise ValueError('Input coordinates have {0} values but ' + 'representation {1} only accepts {2}' + .format(n_coords, frame.representation.get_name(), n_attr_names)) + + # Now transpose vals to get [(v1_0 .. v1_N), (v2_0 .. v2_N), (v3_0 .. v3_N)] + # (ok since we know it is exactly rectangular). (Note: can't just use zip(*values) + # because Longitude et al distinguishes list from tuple so [a1, a2, ..] is needed + # while (a1, a2, ..) doesn't work. + values = [list(x) for x in zip(*vals)] + + if is_scalar: + values = [x[0] for x in values] + else: + raise ValueError('Cannot parse coordinates from first argument') + + # Finally we have a list of values from which to create the keyword args + # for the frame initialization. Validate by running through the appropriate + # class initializer and supply units (which might be None). + try: + for frame_attr_name, repr_attr_class, value, unit in zip( + frame_attr_names, repr_attr_classes, values, units): + valid_kwargs[frame_attr_name] = repr_attr_class(value, unit=unit, + copy=False) + except Exception as err: + raise ValueError('Cannot parse first argument data "{0}" for attribute ' + '{1}'.format(value, frame_attr_name), err) + return valid_kwargs + + +def _get_representation_attrs(frame, units, kwargs): + """ + Find instances of the "representation attributes" for specifying data + for this frame. Pop them off of kwargs, run through the appropriate class + constructor (to validate and apply unit), and put into the output + valid_kwargs. "Representation attributes" are the frame-specific aliases + for the underlying data values in the representation, e.g. "ra" for "lon" + for many equatorial spherical representations, or "w" for "x" in the + cartesian representation of Galactic. + """ + frame_attr_names = frame.representation_component_names.keys() + repr_attr_classes = frame.representation.attr_classes.values() + + valid_kwargs = {} + for frame_attr_name, repr_attr_class, unit in zip(frame_attr_names, repr_attr_classes, units): + value = kwargs.pop(frame_attr_name, None) + if value is not None: + valid_kwargs[frame_attr_name] = repr_attr_class(value, unit=unit) + + return valid_kwargs + + +def _parse_ra_dec(coord_str): + """ + Parse RA and Dec values from a coordinate string. Currently the + following formats are supported: + + * space separated 6-value format + * space separated <6-value format, this requires a plus or minus sign + separation between RA and Dec + * sign separated format + * JHHMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits + * JDDDMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits + + Parameters + ---------- + coord_str : str + Coordinate string to parse. + + Returns + ------- + coord : str or list of str + Parsed coordinate values. + """ + + if isinstance(coord_str, six.string_types): + coord1 = coord_str.split() + else: + # This exception should never be raised from SkyCoord + raise TypeError('coord_str must be a single str') + + if len(coord1) == 6: + coord = (' '.join(coord1[:3]), ' '.join(coord1[3:])) + elif len(coord1) > 2: + coord = PLUS_MINUS_RE.split(coord_str) + coord = (coord[0], ' '.join(coord[1:])) + elif len(coord1) == 1: + match_j = J_PREFIXED_RA_DEC_RE.match(coord_str) + if match_j: + coord = match_j.groups() + if len(coord[0].split('.')[0]) == 7: + coord = ('{0} {1} {2}'. + format(coord[0][0:3], coord[0][3:5], coord[0][5:]), + '{0} {1} {2}'. + format(coord[1][0:3], coord[1][3:5], coord[1][5:])) + else: + coord = ('{0} {1} {2}'. + format(coord[0][0:2], coord[0][2:4], coord[0][4:]), + '{0} {1} {2}'. + format(coord[1][0:3], coord[1][3:5], coord[1][5:])) + else: + coord = PLUS_MINUS_RE.split(coord_str) + coord = (coord[0], ' '.join(coord[1:])) + else: + coord = coord1 + + return coord diff --git a/astropy/coordinates/solar_system.py b/astropy/coordinates/solar_system.py new file mode 100644 index 0000000..d188738 --- /dev/null +++ b/astropy/coordinates/solar_system.py @@ -0,0 +1,511 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This module contains convenience functions for retrieving solar system +ephemerides from jplephem. +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from collections import OrderedDict + +import numpy as np + +from .sky_coordinate import SkyCoord +from ..utils.data import download_file +from ..utils.decorators import classproperty +from ..utils.state import ScienceState +from ..utils import indent +from .. import units as u +from .. import _erfa as erfa +from ..constants import c as speed_of_light +from .representation import CartesianRepresentation +from .orbital_elements import calc_moon +from .builtin_frames import GCRS, ICRS +from .builtin_frames.utils import get_jd12 +from ..extern import six + +__all__ = ["get_body", "get_moon", "get_body_barycentric", + "get_body_barycentric_posvel", "solar_system_ephemeris"] + + +DEFAULT_JPL_EPHEMERIS = 'de430' + +"""List of kernel pairs needed to calculate positions of a given object.""" +BODY_NAME_TO_KERNEL_SPEC = OrderedDict( + (('sun', [(0, 10)]), + ('mercury', [(0, 1), (1, 199)]), + ('venus', [(0, 2), (2, 299)]), + ('earth-moon-barycenter', [(0, 3)]), + ('earth', [(0, 3), (3, 399)]), + ('moon', [(0, 3), (3, 301)]), + ('mars', [(0, 4)]), + ('jupiter', [(0, 5)]), + ('saturn', [(0, 6)]), + ('uranus', [(0, 7)]), + ('neptune', [(0, 8)]), + ('pluto', [(0, 9)])) + ) + +"""Indices to the plan94 routine for the given object.""" +PLAN94_BODY_NAME_TO_PLANET_INDEX = OrderedDict( + (('mercury', 1), + ('venus', 2), + ('earth-moon-barycenter', 3), + ('mars', 4), + ('jupiter', 5), + ('saturn', 6), + ('uranus', 7), + ('neptune', 8))) + +_EPHEMERIS_NOTE = """ +You can either give an explicit ephemeris or use a default, which is normally +a built-in ephemeris that does not require ephemeris files. To change +the default to be the JPL ephemeris:: + + >>> from astropy.coordinates import solar_system_ephemeris + >>> solar_system_ephemeris.set('jpl') # doctest: +SKIP + +Use of any JPL ephemeris requires the jplephem package +(https://pypi.python.org/pypi/jplephem). +If needed, the ephemeris file will be downloaded (and cached). + +One can check which bodies are covered by a given ephemeris using:: + >>> solar_system_ephemeris.bodies + ('earth', 'sun', 'moon', 'mercury', 'venus', 'earth-moon-barycenter', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune') +"""[1:-1] + + +class solar_system_ephemeris(ScienceState): + """Default ephemerides for calculating positions of Solar-System bodies. + + This can be one of the following:: + + - 'builtin': polynomial approximations to the orbital elements. + - 'de430' or 'de432s': short-cuts for recent JPL dynamical models. + - 'jpl': Alias for the default JPL ephemeris (currently, 'de430'). + - URL: (str) The url to a SPK ephemeris in SPICE binary (.bsp) format. + - `None`: Ensure an Exception is raised without an explicit ephemeris. + + The default is 'builtin', which uses the ``epv00`` and ``plan94`` + routines from the ``erfa`` implementation of the Standards Of Fundamental + Astronomy library. + + Notes + ----- + Any file required will be downloaded (and cached) when the state is set. + The default Satellite Planet Kernel (SPK) file from NASA JPL (de430) is + ~120MB, and covers years ~1550-2650 CE [1]_. The smaller de432s file is + ~10MB, and covers years 1950-2050 [2]_. Older versions of the JPL + ephemerides (such as the widely used de200) can be used via their URL [3]_. + + .. [1] http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de430-de431.txt + .. [2] http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de432s.txt + .. [3] http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/a_old_versions/ + """ + _value = 'builtin' + _kernel = None + + @classmethod + def validate(cls, value): + # make no changes if value is None + if value is None: + return cls._value + # Set up Kernel; if the file is not in cache, this will download it. + cls.get_kernel(value) + return value + + @classmethod + def get_kernel(cls, value): + # ScienceState only ensures the `_value` attribute is up to date, + # so we need to be sure any kernel returned is consistent. + if cls._kernel is None or cls._kernel.origin != value: + if cls._kernel is not None: + cls._kernel.daf.file.close() + cls._kernel = None + kernel = _get_kernel(value) + if kernel is not None: + kernel.origin = value + cls._kernel = kernel + return cls._kernel + + @classproperty + def kernel(cls): + return cls.get_kernel(cls._value) + + @classproperty + def bodies(cls): + if cls._value is None: + return None + if cls._value.lower() == 'builtin': + return (('earth', 'sun', 'moon') + + tuple(PLAN94_BODY_NAME_TO_PLANET_INDEX.keys())) + else: + return tuple(BODY_NAME_TO_KERNEL_SPEC.keys()) + + +def _get_kernel(value): + """ + Try importing jplephem, download/retrieve from cache the Satellite Planet + Kernel corresponding to the given ephemeris. + """ + if value is None or value.lower() == 'builtin': + return None + + if value.lower() == 'jpl': + value = DEFAULT_JPL_EPHEMERIS + + if value.lower() in ('de430', 'de432s'): + value = ('http://naif.jpl.nasa.gov/pub/naif/generic_kernels' + '/spk/planets/{:s}.bsp'.format(value.lower())) + else: + try: + six.moves.urllib.parse.urlparse(value) + except Exception: + raise ValueError('{} was not one of the standard strings and ' + 'could not be parsed as a URL'.format(value)) + + try: + from jplephem.spk import SPK + except ImportError: + raise ImportError("Solar system JPL ephemeris calculations require " + "the jplephem package " + "(https://pypi.python.org/pypi/jplephem)") + + return SPK.open(download_file(value, cache=True)) + + +def _get_body_barycentric_posvel(body, time, ephemeris=None, + get_velocity=True): + """Calculate the barycentric position (and velocity) of a solar system body. + + Parameters + ---------- + body : str or other + The solar system body for which to calculate positions. Can also be a + kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL + kernel. + time : `~astropy.time.Time` + Time of observation. + ephemeris : str, optional + Ephemeris to use. By default, use the one set with + ``astropy.coordinates.solar_system_ephemeris.set`` + get_velocity : bool, optional + Whether or not to calculate the velocity as well as the position. + + Returns + ------- + position : `~astropy.coordinates.CartesianRepresentation` or tuple + Barycentric (ICRS) position or tuple of position and velocity. + + Notes + ----- + No velocity can be calculated with the built-in ephemeris for the Moon. + + Whether or not velocities are calculated makes little difference for the + built-in ephemerides, but for most JPL ephemeris files, the execution time + roughly doubles. + """ + + if ephemeris is None: + ephemeris = solar_system_ephemeris.get() + if ephemeris is None: + raise ValueError(_EPHEMERIS_NOTE) + kernel = solar_system_ephemeris.kernel + else: + kernel = _get_kernel(ephemeris) + + jd1, jd2 = get_jd12(time, 'tdb') + if kernel is None: + body = body.lower() + earth_pv_helio, earth_pv_bary = erfa.epv00(jd1, jd2) + if body == 'earth': + body_pv_bary = earth_pv_bary + + elif body == 'moon': + if get_velocity: + raise KeyError("the Moon's velocity cannot be calculated with " + "the '{0}' ephemeris.".format(ephemeris)) + return calc_moon(time).cartesian + + else: + sun_pv_bary = earth_pv_bary - earth_pv_helio + if body == 'sun': + body_pv_bary = sun_pv_bary + else: + try: + body_index = PLAN94_BODY_NAME_TO_PLANET_INDEX[body] + except KeyError: + raise KeyError("{0}'s position and velocity cannot be " + "calculated with the '{1}' ephemeris." + .format(body, ephemeris)) + body_pv_helio = erfa.plan94(jd1, jd2, body_index) + body_pv_bary = body_pv_helio + sun_pv_bary + + body_pos_bary = CartesianRepresentation( + body_pv_bary[..., 0, :], unit=u.au, xyz_axis=-1, copy=False) + if get_velocity: + body_vel_bary = CartesianRepresentation( + body_pv_bary[..., 1, :], unit=u.au/u.day, xyz_axis=-1, + copy=False) + + else: + if isinstance(body, six.string_types): + # Look up kernel chain for JPL ephemeris, based on name + try: + kernel_spec = BODY_NAME_TO_KERNEL_SPEC[body.lower()] + except KeyError: + raise KeyError("{0}'s position cannot be calculated with " + "the {1} ephemeris.".format(body, ephemeris)) + else: + # otherwise, assume the user knows what their doing and intentionally + # passed in a kernel chain + kernel_spec = body + + # jplephem cannot handle multi-D arrays, so convert to 1D here. + jd1_shape = getattr(jd1, 'shape', ()) + if len(jd1_shape) > 1: + jd1, jd2 = jd1.ravel(), jd2.ravel() + # Note that we use the new jd1.shape here to create a 1D result array. + # It is reshaped below. + body_posvel_bary = np.zeros((2 if get_velocity else 1, 3) + + getattr(jd1, 'shape', ())) + for pair in kernel_spec: + spk = kernel[pair] + if spk.data_type == 3: + # Type 3 kernels contain both position and velocity. + posvel = spk.compute(jd1, jd2) + if get_velocity: + body_posvel_bary += posvel.reshape(body_posvel_bary.shape) + else: + body_posvel_bary[0] += posvel[:4] + else: + # spk.generate first yields the position and then the + # derivative. If no velocities are desired, body_posvel_bary + # has only one element and thus the loop ends after a single + # iteration, avoiding the velocity calculation. + for body_p_or_v, p_or_v in zip(body_posvel_bary, + spk.generate(jd1, jd2)): + body_p_or_v += p_or_v + + body_posvel_bary.shape = body_posvel_bary.shape[:2] + jd1_shape + body_pos_bary = CartesianRepresentation(body_posvel_bary[0], + unit=u.km, copy=False) + if get_velocity: + body_vel_bary = CartesianRepresentation(body_posvel_bary[1], + unit=u.km/u.day, copy=False) + + return (body_pos_bary, body_vel_bary) if get_velocity else body_pos_bary + + +def get_body_barycentric_posvel(body, time, ephemeris=None): + """Calculate the barycentric position and velocity of a solar system body. + + Parameters + ---------- + body : str or other + The solar system body for which to calculate positions. Can also be a + kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL + kernel. + time : `~astropy.time.Time` + Time of observation. + ephemeris : str, optional + Ephemeris to use. By default, use the one set with + ``astropy.coordinates.solar_system_ephemeris.set`` + + Returns + ------- + position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation` + Tuple of barycentric (ICRS) position and velocity. + + See also + -------- + get_body_barycentric : to calculate position only. + This is faster by about a factor two for JPL kernels, but has no + speed advantage for the built-in ephemeris. + + Notes + ----- + The velocity cannot be calculated for the Moon. To just get the position, + use :func:`~astropy.coordinates.get_body_barycentric`. + + """ + return _get_body_barycentric_posvel(body, time, ephemeris) + + +get_body_barycentric_posvel.__doc__ += indent(_EPHEMERIS_NOTE)[4:] + + +def get_body_barycentric(body, time, ephemeris=None): + """Calculate the barycentric position of a solar system body. + + Parameters + ---------- + body : str or other + The solar system body for which to calculate positions. Can also be a + kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL + kernel. + time : `~astropy.time.Time` + Time of observation. + ephemeris : str, optional + Ephemeris to use. By default, use the one set with + ``astropy.coordinates.solar_system_ephemeris.set`` + + Returns + ------- + position : `~astropy.coordinates.CartesianRepresentation` + Barycentric (ICRS) position of the body in cartesian coordinates + + See also + -------- + get_body_barycentric_posvel : to calculate both position and velocity. + + Notes + ----- + """ + return _get_body_barycentric_posvel(body, time, ephemeris, + get_velocity=False) + + +get_body_barycentric.__doc__ += indent(_EPHEMERIS_NOTE)[4:] + + +def _get_apparent_body_position(body, time, ephemeris): + """Calculate the apparent position of body ``body`` relative to Earth. + + This corrects for the light-travel time to the object. + + Parameters + ---------- + body : str or other + The solar system body for which to calculate positions. Can also be a + kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL + kernel. + time : `~astropy.time.Time` + Time of observation. + ephemeris : str, optional + Ephemeris to use. By default, use the one set with + ``~astropy.coordinates.solar_system_ephemeris.set`` + + Returns + ------- + cartesian_position : `~astropy.coordinates.CartesianRepresentation` + Barycentric (ICRS) apparent position of the body in cartesian coordinates + """ + if ephemeris is None: + ephemeris = solar_system_ephemeris.get() + # builtin ephemeris and moon is a special case, with no need to account for + # light travel time, since this is already included in the Meeus algorithm + # used. + if ephemeris == 'builtin' and body.lower() == 'moon': + return get_body_barycentric(body, time, ephemeris) + + # Calculate position given approximate light travel time. + delta_light_travel_time = 20. * u.s + emitted_time = time + light_travel_time = 0. * u.s + earth_loc = get_body_barycentric('earth', time, ephemeris) + while np.any(np.fabs(delta_light_travel_time) > 1.0e-8*u.s): + body_loc = get_body_barycentric(body, emitted_time, ephemeris) + earth_distance = (body_loc - earth_loc).norm() + delta_light_travel_time = (light_travel_time - + earth_distance/speed_of_light) + light_travel_time = earth_distance/speed_of_light + emitted_time = time - light_travel_time + + return get_body_barycentric(body, emitted_time, ephemeris) + + +_get_apparent_body_position.__doc__ += indent(_EPHEMERIS_NOTE)[4:] + + +def get_body(body, time, location=None, ephemeris=None): + """ + Get a `~astropy.coordinates.SkyCoord` for a solar system body as observed + from a location on Earth in the `~astropy.coordinates.GCRS` reference + system. + + Parameters + ---------- + body : str or other + The solar system body for which to calculate positions. Can also be a + kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL + kernel. + time : `~astropy.time.Time` + Time of observation. + location : `~astropy.coordinates.EarthLocation`, optional + Location of observer on the Earth. If not given, will be taken from + ``time`` (if not present, a geocentric observer will be assumed). + ephemeris : str, optional + Ephemeris to use. If not given, use the one set with + ``astropy.coordinates.solar_system_ephemeris.set`` (which is + set to 'builtin' by default). + + Returns + ------- + skycoord : `~astropy.coordinates.SkyCoord` + GCRS Coordinate for the body + + Notes + ----- + """ + if location is None: + location = time.location + + cartrep = _get_apparent_body_position(body, time, ephemeris) + icrs = ICRS(cartrep) + if location is not None: + obsgeoloc, obsgeovel = location.get_gcrs_posvel(time) + gcrs = icrs.transform_to(GCRS(obstime=time, + obsgeoloc=obsgeoloc, + obsgeovel=obsgeovel)) + else: + gcrs = icrs.transform_to(GCRS(obstime=time)) + return SkyCoord(gcrs) + + +get_body.__doc__ += indent(_EPHEMERIS_NOTE)[4:] + + +def get_moon(time, location=None, ephemeris=None): + """ + Get a `~astropy.coordinates.SkyCoord` for the Earth's Moon as observed + from a location on Earth in the `~astropy.coordinates.GCRS` reference + system. + + Parameters + ---------- + time : `~astropy.time.Time` + Time of observation + location : `~astropy.coordinates.EarthLocation` + Location of observer on the Earth. If none is supplied, taken from + ``time`` (if not present, a geocentric observer will be assumed). + ephemeris : str, optional + Ephemeris to use. If not given, use the one set with + ``astropy.coordinates.solar_system_ephemeris.set`` (which is + set to 'builtin' by default). + + Returns + ------- + skycoord : `~astropy.coordinates.SkyCoord` + GCRS Coordinate for the Moon + + Notes + ----- + """ + + return get_body('moon', time, location=location, ephemeris=ephemeris) + + +get_moon.__doc__ += indent(_EPHEMERIS_NOTE)[4:] + + +def _apparent_position_in_true_coordinates(skycoord): + """ + Convert Skycoord in GCRS frame into one in which RA and Dec + are defined w.r.t to the true equinox and poles of the Earth + """ + jd1, jd2 = get_jd12(skycoord.obstime, 'tt') + _, _, _, _, _, _, _, rbpn = erfa.pn00a(jd1, jd2) + return SkyCoord(skycoord.frame.realize_frame( + skycoord.cartesian.transform(rbpn))) diff --git a/astropy/coordinates/tests/__init__.py b/astropy/coordinates/tests/__init__.py new file mode 100644 index 0000000..800d82e --- /dev/null +++ b/astropy/coordinates/tests/__init__.py @@ -0,0 +1,2 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) diff --git a/astropy/coordinates/tests/accuracy/__init__.py b/astropy/coordinates/tests/accuracy/__init__.py new file mode 100644 index 0000000..a724ea4 --- /dev/null +++ b/astropy/coordinates/tests/accuracy/__init__.py @@ -0,0 +1,11 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +""" +The modules in the accuracy testing subpackage are primarily intended for +comparison with "known-good" (or at least "known-familiar") datasets. More +basic functionality and sanity checks are in the main ``coordinates/tests`` +testing modules. +""" + +N_ACCURACY_TESTS = 10 # the number of samples to use per accuracy test diff --git a/astropy/coordinates/tests/accuracy/fk4_no_e_fk4.csv b/astropy/coordinates/tests/accuracy/fk4_no_e_fk4.csv new file mode 100644 index 0000000..89ede4c --- /dev/null +++ b/astropy/coordinates/tests/accuracy/fk4_no_e_fk4.csv @@ -0,0 +1,202 @@ +# This file was generated with the ref_fk4_no_e_fk4.py script, and the reference values were computed using AST +obstime,ra_in,dec_in,ra_fk4ne,dec_fk4ne,ra_fk4,dec_fk4 +B1995.95,334.661793414,43.9385116594,334.661871722,43.9384643913,334.661715106,43.9385589276 +B1954.56,113.895199649,-14.1109832563,113.895104206,-14.1109806856,113.895295093,-14.110985827 +B1953.55,66.2107722038,-7.76265420193,66.2106936357,-7.76263900837,66.2108507719,-7.76266939548 +B1970.69,73.6417002791,41.7006137481,73.6415874825,41.7005905459,73.6418130758,41.7006369502 +B1960.78,204.381010469,-14.9357743223,204.381033022,-14.935790469,204.380987917,-14.9357581756 +B1975.98,214.396093073,-66.7648451487,214.39618819,-66.7649221332,214.395997956,-66.7647681643 +B1977.93,347.225227105,6.27744217753,347.225265767,6.27744057158,347.225188443,6.27744378347 +B1973.69,235.143754874,-5.59566003897,235.143821166,-5.59565879904,235.143688582,-5.59566127889 +B1960.79,269.606389512,26.7823112195,269.6064937,26.7823268289,269.606285325,26.78229561 +B1961.97,235.285153507,-14.0695156888,235.285221697,-14.0695245442,235.285085317,-14.0695068334 +B1960.84,269.177331338,42.9472695107,269.177458208,42.9472886864,269.177204468,42.947250335 +B1982.78,346.070424986,-3.51848810713,346.070465234,-3.51847491299,346.070384739,-3.51850130129 +B1992.32,3.01978725896,7.19732176646,3.0198007213,7.19731786183,3.0197737966,7.1973256711 +B1996.52,38.3199756112,18.8080489808,38.3199297604,18.8080292742,38.320021462,18.8080686874 +B1990.02,107.533336957,-4.33088623215,107.533242366,-4.3308791254,107.533431548,-4.33089333889 +B1984.04,236.30802591,14.3162535375,236.308095417,14.316277761,236.307956402,14.316229314 +B1960.36,291.532518915,-33.7960784017,291.532631247,-33.7960622584,291.532406582,-33.7960945449 +B1987.08,313.983328941,27.7572327639,313.983419024,27.757215788,313.983238857,27.7572497397 +B1984.85,347.273135054,-13.6880685538,347.273174533,-13.6880403026,347.273095575,-13.688096805 +B1969.09,260.526724891,-37.6134342267,260.526837065,-37.6134483095,260.526612717,-37.6134201437 +B1992.51,231.291118043,-27.2371455509,231.291186922,-27.2371716878,231.291049163,-27.237119414 +B1976.41,258.283303492,-30.1025933842,258.283404615,-30.1026049901,258.28320237,-30.1025817782 +B1994.65,168.335642599,-44.084769302,168.33559145,-44.0848244927,168.335693748,-44.0847141113 +B1991.03,117.210483914,32.8708634152,117.210375337,32.8708843641,117.210592491,32.8708424662 +B1961.43,158.272058119,-29.286471988,158.271999107,-29.2865040826,158.27211713,-29.2864398934 +B1991.03,262.688069789,-48.1516431413,262.688204769,-48.1516601921,262.687934809,-48.1516260902 +B1956.93,357.845250924,19.2890677934,357.845273996,19.2890447616,357.845227852,19.2890908252 +B1974.12,243.674536239,-10.0431678136,243.67461278,-10.0431700653,243.674459697,-10.0431655619 +B1957.44,284.696106425,19.6051067047,284.696206986,19.6051121836,284.696005864,19.6051012256 +B1972.41,61.5291328053,18.6403709997,61.5290555992,18.640359185,61.5292100114,18.6403828144 +B1983.30,9.66573928438,-22.9075078717,9.66574187976,-22.9074636315,9.66573668899,-22.9075521118 +B1989.45,288.133287813,-36.6947385674,288.1334053,-36.6947252717,288.133170326,-36.694751863 +B1983.10,325.340113758,-33.7758802174,325.340195579,-33.7758368156,325.340031937,-33.7759236192 +B1985.58,8.88343575454,-49.4693354042,8.88344142656,-49.4692581619,8.88343008249,-49.4694126467 +B1994.40,177.029034641,-67.7755279684,177.028973591,-67.7756101942,177.02909569,-67.7754457425 +B1957.08,189.451860246,-68.7071945134,189.451852687,-68.707280034,189.451867805,-68.7071089929 +B1957.38,214.691763751,-32.6160600699,214.691808834,-32.6161002775,214.691718668,-32.6160198625 +B1966.30,18.7047162369,-32.9080620608,18.7047012927,-32.9080042868,18.7047311812,-32.9081198349 +B1951.59,322.232230099,14.4669345738,322.232303942,14.4669266585,322.232156257,14.466942489 +B1984.39,262.175824918,51.7319974933,262.175969881,51.7320265851,262.175679954,51.7319684013 +B1988.24,294.6060041,34.0181871087,294.606115453,34.0181812889,294.605892748,34.0181929283 +B1967.50,180.08019102,26.2892216009,180.080170768,26.2892699746,180.080211273,26.2891732273 +B1980.80,291.668187169,-22.2789167174,291.668288006,-22.2789027838,291.668086332,-22.2789306509 +B1997.92,34.548669268,-15.8924906144,34.5486300111,-15.8924591395,34.548708525,-15.8925220893 +B1964.55,78.8220157436,-37.4332268082,78.8219051397,-37.4331986299,78.8221263475,-37.4332549865 +B1984.33,93.1388621771,60.5731416456,93.1386708523,60.5731340793,93.139053502,60.5731492117 +B1952.11,168.518071423,7.09229333513,168.51803468,7.09231202586,168.518108166,7.09227464443 +B1953.13,165.374352937,39.3890686842,165.374299611,39.3891290726,165.374406263,39.3890082959 +B1990.72,255.423520875,-17.5881075751,255.423610608,-17.5881124458,255.423431143,-17.5881027044 +B1971.83,64.0990821181,36.8289797648,64.098987426,36.8289518646,64.0991768103,36.829007665 +B1969.60,191.321958369,-52.3532066605,191.321958947,-52.3532769701,191.321957792,-52.3531363511 +B1966.53,60.3872023631,25.1025882655,60.3871229238,25.1025691776,60.3872818026,25.1026073533 +B1972.88,276.773010626,56.6051138031,276.773182582,56.6051241599,276.772838671,56.6051034461 +B1991.77,334.141397682,37.3852087993,334.141469519,37.3851690556,334.141325844,37.3852485429 +B1973.34,219.417716878,-20.2290328911,219.417764848,-20.2290543437,219.417668907,-20.2290114386 +B1971.06,54.0660580808,-29.3264933861,54.0659838918,-29.3264524474,54.06613227,-29.3265343247 +B1978.54,176.26561333,-0.572718169429,176.265589013,-0.572711155523,176.265637647,-0.572725183324 +B1986.95,135.84418338,-9.94938261687,135.844104187,-9.94938414897,135.844262573,-9.94938108476 +B1952.75,305.496508312,-8.63421746611,305.496595751,-8.63420374088,305.496420873,-8.63423119132 +B1981.21,327.995002307,-58.3471659896,327.995125925,-58.3471028456,327.994878689,-58.3472291335 +B1981.05,138.185539617,11.9337947187,138.185462216,11.9338143115,138.185617017,11.9337751259 +B1950.06,113.578525223,29.6301583121,113.578418602,29.6301753387,113.578631843,29.6301412853 +B1980.14,204.621895006,36.5235009134,204.621922605,36.5235622135,204.621867408,36.5234396134 +B1952.01,67.6144926088,-13.7094836718,67.6144111325,-13.7094635522,67.6145740851,-13.7095037914 +B1979.29,45.3029557779,36.4639084123,45.30288945,36.4638681314,45.3030221059,36.4639486932 +B1972.42,247.534489816,-3.23349952461,247.534569024,-3.23349456661,247.534410608,-3.2335044826 +B1967.69,287.858418461,26.2825631559,287.858523588,26.2825653277,287.858313334,26.2825609839 +B1996.68,206.473163472,-38.4312130715,206.473195575,-38.4312637479,206.473131368,-38.4311623951 +B1963.36,350.362793376,-7.51631961926,350.36282729,-7.51630014511,350.362759462,-7.51633909343 +B1964.06,228.259575769,40.311002157,228.259650941,40.3110571481,228.259500598,40.3109471658 +B1975.25,319.831820932,40.7337792676,319.831918659,40.7337465323,319.831723205,40.7338120029 +B1982.34,178.349313153,-38.3854710615,178.349286408,-38.3855223276,178.349339897,-38.3854197955 +B1998.53,126.58195076,-73.6980337652,126.581645487,-73.6980707198,126.582256033,-73.6979968102 +B1951.79,257.122932676,24.0154376566,257.123027615,24.0154606049,257.122837737,24.0154147083 +B1971.16,181.414481921,-17.7858263698,181.414465135,-17.7858473968,181.414498707,-17.7858053429 +B1979.42,81.2295383474,-9.26450146427,81.2294479067,-9.26448844016,81.2296287882,-9.26451448837 +B1986.59,88.1907984871,32.4238226453,88.1906888861,32.4238179627,88.1909080881,32.4238273279 +B1958.78,285.408252018,67.7826509035,285.408502334,67.7826473151,285.408001701,67.7826544915 +B1975.53,178.262069224,51.7327600597,178.262035148,51.7328376286,178.2621033,51.7326824908 +B1975.01,329.433722424,-46.8960749035,329.433814783,-46.8960177216,329.433630065,-46.8961320854 +B1994.64,340.333860195,36.5560891832,340.333920655,36.5560469817,340.333799735,36.5561313847 +B1969.13,191.963602676,21.3572019706,191.963604196,21.3572439205,191.963601156,21.3571600208 +B1983.14,90.8973340407,3.44588414281,90.897240458,3.44589104844,90.8974276234,3.44587723717 +B1952.34,259.510340943,47.0512387915,259.51047047,47.0512697696,259.510211416,47.0512078131 +B1987.56,132.277954966,30.4307232942,132.277860775,30.4307550149,132.278049157,30.4306915735 +B1968.44,179.513439448,-54.44865752,179.513406635,-54.4487285563,179.513472261,-54.4485864837 +B1997.40,81.5670170865,-19.9451944488,81.5669219294,-19.9451761627,81.5671122436,-19.9452127349 +B1967.36,127.283632829,-10.0946390302,127.283546305,-10.0946385601,127.283719352,-10.0946395003 +B1984.19,234.306643184,-86.4404274379,234.307689689,-86.4404960056,234.305596721,-86.440358869 +B1991.23,112.65584231,11.2521500479,112.655747491,11.2521615342,112.655937129,11.2521385617 +B1974.31,276.744760981,21.4151577082,276.744862642,21.4151677292,276.74465932,21.4151476871 +B1999.21,281.461357214,-15.511897988,281.461455717,-15.5118901893,281.46125871,-15.5119057865 +B1980.19,306.867413859,-11.9467360888,306.867501237,-11.9467197906,306.86732648,-11.946752387 +B1987.98,341.966066455,-2.82477813631,341.966112735,-2.82476612903,341.966020175,-2.82479014361 +B1984.23,38.6362483924,9.3322810896,38.6362039361,9.33227526676,38.6362928487,9.33228691243 +B1996.62,327.861128148,-46.529254733,327.861222674,-46.5291991016,327.86103362,-46.5293103644 +B1997.49,120.979858288,87.22617179,120.978013685,87.226204397,120.981702849,87.2261391801 +B1999.51,297.496953653,0.839666332936,297.497044724,0.83967387104,297.496862583,0.839658794827 +B1956.31,323.316228643,-0.794522598791,323.316298957,-0.794513783928,323.316158329,-0.794531413663 +B1998.83,15.3775095611,-38.7740290611,15.3775004994,-38.7739636006,15.3775186228,-38.7740945216 +B1961.46,70.486199672,-24.0682131367,70.4861102148,-24.0681861769,70.4862891293,-24.0682400965 +B1959.30,106.020475905,36.6574903487,106.020358021,36.6575015631,106.020593788,36.6574791342 +B1975.46,225.719957006,-24.2326924255,225.720016128,-24.2327172566,225.719897883,-24.2326675945 +B1976.52,31.0403178442,23.2187819108,31.040282636,23.2187540208,31.0403530525,23.2188098008 +B1964.13,51.4602071324,-27.0058546166,51.4601381551,-27.0058147039,51.4602761098,-27.0058945294 +B1965.51,185.697546923,55.594260797,185.697531081,55.5943432416,185.697562765,55.5941783525 +B1965.49,248.162878677,-23.7609450888,248.162965707,-23.7609586287,248.162791647,-23.7609315488 +B1963.32,308.385291884,51.2349043028,308.385426622,51.2348753519,308.385157147,51.2349332534 +B1979.67,233.050205996,63.3093356498,233.050347232,63.3094022915,233.05006476,63.3092690079 +B1960.86,209.382723191,-41.4659129842,209.382762908,-41.4659667228,209.382683474,-41.4658592457 +B1970.12,256.001743835,-16.3448051664,256.001833404,-16.3448088895,256.001654267,-16.3448014432 +B1964.43,90.8700685367,21.3678694408,90.8699682366,21.3678706796,90.8701688369,21.3678682019 +B1958.69,324.057486054,57.4352750563,324.057615131,57.4352248218,324.057356976,57.4353252907 +B1961.29,159.225729446,-45.2472278228,159.225658238,-45.2472794744,159.225800655,-45.2471761712 +B1999.43,7.38749687642,-53.1540997613,7.38750715011,-53.1540192078,7.38748660267,-53.1541803148 +B1971.70,345.477965039,-10.1831007688,345.478006755,-10.1830778328,345.477923323,-10.1831237048 +B1991.41,234.801152081,71.8511934075,234.80136258,71.8512610944,234.800941584,71.8511257203 +B1978.63,184.754250038,-66.4894904918,184.754223702,-66.4895738307,184.754276373,-66.4894071529 +B1982.60,245.64829793,-38.7682176459,245.648397087,-38.7682459424,245.648198773,-38.7681893494 +B1986.49,176.234540627,12.5643501076,176.234515663,12.564377805,176.23456559,12.5643224102 +B1969.56,333.536461653,-55.645568776,333.536564215,-55.6455021935,333.53635909,-55.6456353585 +B1969.64,185.716717981,-21.5568171888,185.71670839,-21.5568445326,185.716727571,-21.556789845 +B1992.98,25.9775574253,12.7249831044,25.9775324561,12.7249706335,25.9775823945,12.7249955753 +B1990.50,204.302987352,-36.6989586206,204.303014372,-36.6990074874,204.302960331,-36.6989097538 +B1991.83,221.487546141,22.5689795999,221.487598122,22.569018351,221.487494159,22.5689408487 +B1959.40,338.956666009,-30.7135370512,338.956724763,-30.7134891887,338.956607255,-30.7135849138 +B1967.98,149.5308077,21.1458572723,149.530740161,21.1458902834,149.530875238,21.1458242612 +B1974.10,95.1983908472,-1.61163007915,95.1982963974,-1.61162187599,95.198485297,-1.6116382823 +B1998.30,35.0615395317,-28.6207880841,35.0614956333,-28.620739571,35.0615834301,-28.6208365972 +B1978.17,174.903919876,-25.7547140538,174.903890465,-25.754746515,174.903949287,-25.7546815927 +B1991.38,167.27863063,54.1842744725,167.278565096,54.1843495205,167.278696164,54.1841994246 +B1953.81,10.7133541168,-26.6356033619,10.7133548501,-26.6355537205,10.7133533835,-26.6356530033 +B1977.66,249.939886269,43.0233288254,249.939997359,43.0233681421,249.939775179,43.0232895085 +B1977.40,258.100960451,-37.3838036503,258.101070404,-37.3838198729,258.1008505,-37.3837874275 +B1995.27,262.732112385,-19.8057986634,262.732208125,-19.8058013404,262.732016645,-19.8057959863 +B1968.47,149.166366188,63.2857703333,149.166225063,63.2858369635,149.166507312,63.2857037031 +B1995.06,5.4355841259,0.695799807062,5.43559350993,0.695806590879,5.43557474185,0.695793023234 +B1957.03,327.231056694,-11.1377396332,327.231123747,-11.137718635,327.230989642,-11.1377606314 +B1954.96,284.17633852,-71.0631656787,284.17663058,-71.0631583005,284.176046459,-71.0631730565 +B1998.66,59.4717008987,14.0960045791,59.4716277587,14.0959969126,59.4717740389,14.0960122456 +B1997.10,112.602946077,-17.7763932222,112.6028484,-17.7763914439,112.603043755,-17.7763950006 +B1979.55,219.940310095,-26.5130440909,219.940361247,-26.5130741126,219.940258944,-26.5130140693 +B1952.60,131.216503219,-60.6790709392,131.216335542,-60.6791085681,131.216670895,-60.6790333101 +B1952.51,56.1738921125,-19.3427782341,56.1738209005,-19.3427485454,56.1739633247,-19.3428079229 +B1966.23,63.8293728328,-59.8347944156,63.8292225342,-59.8347407237,63.829523132,-59.8348481073 +B1968.79,312.440281577,-82.909075449,312.440938353,-82.9090254915,312.439624792,-82.9091254056 +B1988.21,104.43408064,-66.6447299251,104.433841614,-66.6447318349,104.434319666,-66.644728015 +B1992.96,210.664663673,-17.5831928536,210.664697001,-17.5832123123,210.664630345,-17.5831733949 +B1977.29,163.438155327,-54.6954182678,163.438079056,-54.6954822858,163.438231598,-54.6953542498 +B1966.19,148.024127582,2.32865180198,148.024062692,2.32866254348,148.024192472,2.32864106049 +B1970.29,317.748400264,-34.6457182874,317.748492841,-34.6456795601,317.748307686,-34.6457570147 +B1955.48,249.374885326,79.5246095403,249.375329338,79.5246600743,249.374441319,79.5245590057 +B1956.86,100.53840787,-27.7507223648,100.538300623,-27.7507149055,100.538515118,-27.750729824 +B1987.27,23.1984832267,21.1208388177,23.1984619158,21.1208127728,23.1985045377,21.1208648626 +B1993.82,71.5045009532,3.00896662959,71.504418313,3.00897208869,71.5045835934,3.00896117048 +B1962.95,335.405788093,-6.90098238794,335.40584389,-6.90096525284,335.405732296,-6.90099952305 +B1984.28,307.588884401,18.8511389183,307.588974176,18.8511327496,307.588794626,18.851145087 +B1967.96,343.704504442,-46.9224252956,343.704568407,-46.9223583286,343.704440477,-46.9224922627 +B1950.30,18.8112053675,35.1485289159,18.8111898096,35.1484812505,18.8112209256,35.1485765813 +B1988.06,208.609805013,-46.3894275721,208.609846395,-46.3894876445,208.609763631,-46.3893674997 +B1970.70,172.978655994,15.4172636989,172.978625355,15.4172953255,172.978686632,15.4172320724 +B1966.69,7.8152324312,-34.9365736294,7.81523908357,-34.936512861,7.81522577882,-34.9366343978 +B1963.90,134.503366944,-72.4111269318,134.503104699,-72.4111743348,134.503629189,-72.4110795286 +B1979.63,149.073048424,14.7065160273,149.072982715,14.7065415958,149.073114132,14.7064904588 +B1966.26,217.406604209,16.5186514295,217.406648071,16.518683228,217.406560347,16.518619631 +B1996.84,241.829541848,16.5114334946,241.82961848,16.5114581776,241.829465216,16.5114088117 +B1954.80,301.991652158,46.8228690265,301.991781762,46.8228497806,301.991522554,46.8228882722 +B1994.16,280.629434995,-19.0017596678,280.629535379,-19.0017524272,280.629334611,-19.0017669083 +B1978.40,144.252375855,-10.2581330338,144.252305474,-10.258136788,144.252446236,-10.2581292796 +B1953.10,286.0305233,12.7464714044,286.030620257,12.7464773437,286.030426344,12.7464654651 +B1993.75,321.524751743,61.8464645226,321.524904902,61.8464140081,321.524598583,61.846515037 +B1961.24,94.4962887092,-44.0946278203,94.4961574273,-44.0946145181,94.4964199912,-44.0946411224 +B1989.97,356.110922656,-39.1892569317,356.110954348,-39.1891928509,356.110890964,-39.1893210125 +B1990.09,307.190555646,-43.7191034979,307.190673602,-43.7190689248,307.190437689,-43.719138071 +B1951.45,263.331776174,25.1917278571,263.331876059,25.1917473693,263.331676289,25.1917083448 +B1981.35,128.003624894,58.8666544649,128.003461169,58.8666953172,128.003788619,58.8666136124 +B1980.23,317.984216655,-8.89508525523,317.984293507,-8.89506861216,317.984139802,-8.8951018983 +B1953.91,312.465272698,5.18400310772,312.465354085,5.18400654399,312.465191311,5.18399967144 +B1988.65,344.0759205,-20.8070551085,344.07596665,-20.8070176615,344.07587435,-20.8070925556 +B1957.17,0.0386123471053,-42.7336081023,0.0386371599928,-42.7335390653,0.0385875341353,-42.7336771394 +B1973.18,5.95477509083,23.9728714179,5.95478442291,23.9728402559,5.95476575873,23.97290258 +B1954.86,113.065220613,27.4191705733,113.065116003,27.4191866686,113.065325223,27.4191544779 +B1978.49,358.313822853,67.0446512684,358.313876751,67.0445691316,358.313768955,67.0447334052 +B1970.19,53.5839203362,-15.011852649,53.5838539771,-15.0118268548,53.5839866953,-15.0118784432 +B1979.33,60.2557627351,25.6833225299,60.2556830704,25.6833027692,60.2558423998,25.6833422906 +B1987.44,273.08593329,76.4393919681,273.086334137,76.439406706,273.085532444,76.4393772296 +B1994.48,25.0306798156,-51.1202356021,25.0306434336,-51.1201589045,25.0307161977,-51.1203122997 +B1968.97,253.970437895,31.094899255,253.970536535,31.0949284071,253.970339254,31.0948701027 +B1964.62,168.89950144,-43.2270950714,168.899452201,-43.2271494771,168.89955068,-43.2270406658 +B1975.46,3.66775780511,39.2622225734,3.66777368182,39.26216915,3.66774192836,39.2622759968 +B1976.64,278.936590632,6.21231840756,278.936686041,6.21232668172,278.936495223,6.21231013337 +B1955.27,285.91236301,9.40548699672,285.912458882,9.40549352262,285.912267137,9.40548047079 +B1952.30,53.8450026285,60.7259893436,53.8448709018,60.7259324097,53.8451343557,60.7260462774 +B1981.10,8.53330744443,-7.54498028811,8.5333117472,-7.54495997493,8.53330314165,-7.54500060131 +B1991.12,274.342957522,-1.24603088049,274.3430518,-1.24602319414,274.342863244,-1.24603856684 +B1952.75,80.5212647616,19.4060625392,80.5211705543,19.4060589302,80.521358969,19.4060661482 +B1989.90,94.3827831954,15.0883386826,94.382685566,15.0883434466,94.3828808249,15.0883339185 +B1962.21,164.473020999,-47.6965440186,164.472957775,-47.69660143,164.473084223,-47.6964866073 +B1990.18,89.9736906625,-16.9964263489,89.973593279,-16.9964134056,89.9737880461,-16.9964392923 +B1964.91,204.582082173,15.6789515837,204.582105142,15.678984165,204.582059203,15.6789190023 diff --git a/astropy/coordinates/tests/accuracy/fk4_no_e_fk5.csv b/astropy/coordinates/tests/accuracy/fk4_no_e_fk5.csv new file mode 100644 index 0000000..99102a4 --- /dev/null +++ b/astropy/coordinates/tests/accuracy/fk4_no_e_fk5.csv @@ -0,0 +1,202 @@ +# This file was generated with the ref_fk4_no_e_fk5.py script, and the reference values were computed using AST +equinox_fk4,equinox_fk5,obstime,ra_in,dec_in,ra_fk5,dec_fk5,ra_fk4,dec_fk4 +B1948.36,J1992.59,B1995.95,334.661793414,43.9385116594,335.127505587,44.1614743713,334.19703321,43.7164045503 +B1971.64,J2006.23,B1954.56,113.895199649,-14.1109832563,114.294239451,-14.189617335,113.496041526,-14.0335757922 +B1970.49,J2015.57,B1953.55,66.2107722038,-7.76265420193,66.7573654302,-7.66250556575,65.6644607308,-7.86499337709 +B1931.50,J1999.69,B1970.69,73.6417002791,41.7006137481,74.8414427945,41.8037189279,72.4451689528,41.5898910005 +B1951.47,J1977.66,B1960.78,204.381010469,-14.9357743223,204.732916483,-15.0684119497,204.02947143,-14.8027671534 +B1955.96,J1999.16,B1975.98,214.396093073,-66.7648451487,215.271219746,-66.9622610907,213.531009752,-66.5653657951 +B1956.23,J2000.23,B1977.93,347.225227105,6.27744217753,347.783144277,6.51660389395,346.667337259,6.03880786927 +B1957.34,J1996.85,B1973.69,235.143754874,-5.59566003897,235.668034446,-5.72055011897,234.619987804,-5.46911905342 +B1941.60,J1993.80,B1960.79,269.606389512,26.7823112195,270.128504362,26.7816404236,269.084278188,26.7856304113 +B1930.71,J2013.89,B1961.97,235.285153507,-14.0695156888,236.447792421,-14.3293747521,234.125715822,-13.8019427393 +B1953.56,J1980.00,B1960.84,269.177331338,42.9472695107,269.379190001,42.9454157845,268.975475883,42.9496418506 +B1940.10,J1975.82,B1982.78,346.070424986,-3.51848810713,346.530942755,-3.32528640922,345.609649936,-3.71130492658 +B1934.68,J2014.12,B1992.32,3.01978725896,7.19732176646,4.04111300197,7.63872974164,1.9996375316,6.75549866988 +B1953.24,J2017.66,B1996.52,38.3199756112,18.8080489808,39.2225541698,19.0876452406,37.4201227465,18.5249551135 +B1955.52,J1986.19,B1990.02,107.533336957,-4.33088623215,107.914038138,-4.38286340945,107.152514675,-4.27999097547 +B1927.27,J2006.35,B1984.04,236.30802591,14.3162535375,237.227969566,14.0749779959,235.388744829,14.5634084162 +B1974.27,J1978.23,B1960.36,291.532518915,-33.7960784017,291.597238932,-33.7879646382,291.467788569,-33.8041689728 +B1930.19,J1986.95,B1987.08,313.983328941,27.7572327639,314.590894151,27.9778790422,313.375876285,27.5389973059 +B1945.29,J1997.99,B1984.85,347.273135054,-13.6880685538,347.963547495,-13.4015008868,346.58154003,-13.9738567052 +B1958.28,J2008.13,B1969.09,260.526724891,-37.6134342267,261.376886242,-37.6570793786,259.677433211,-37.5657291394 +B1934.85,J1985.89,B1992.51,231.291118043,-27.2371455509,232.060225806,-27.4133463836,230.524106155,-27.0579724511 +B1937.09,J1998.50,B1976.41,258.283303492,-30.1025933842,259.264766067,-30.1691519653,257.303071837,-30.0303039078 +B1956.16,J2023.91,B1994.65,168.335642599,-44.084769302,169.131984863,-44.4546574256,167.543307692,-43.7159381708 +B1964.94,J2000.65,B1991.03,117.210483914,32.8708634152,117.781943773,32.7790791562,116.63804006,32.9608828232 +B1952.23,J1998.51,B1961.43,158.272058119,-29.286471988,158.811965795,-29.5262894831,157.73289082,-29.0475527364 +B1934.88,J2008.31,B1991.03,262.688069789,-48.1516431413,264.082620089,-48.1987316304,261.295758898,-48.0946938009 +B1964.21,J2001.06,B1956.93,357.845250924,19.2890677934,358.315118415,19.4941375001,357.375940593,19.084061288 +B1965.72,J1987.86,B1974.12,243.674536239,-10.0431678136,243.97803572,-10.097540261,243.371196745,-9.98821027624 +B1960.54,J2016.21,B1957.44,284.696106425,19.6051067047,285.302622767,19.6853290904,284.089422958,19.5280584762 +B1972.20,J1981.44,B1972.41,61.5291328053,18.6403709997,61.6630317661,18.6648463372,61.3952747433,18.6157899771 +B1967.75,J1983.60,B1983.30,9.66573928438,-22.9075078717,9.8627174508,-22.8205464878,9.46866122286,-22.9945202022 +B1973.18,J1983.75,B1989.45,288.133287813,-36.6947385674,288.310596498,-36.676339325,287.955909092,-36.712964737 +B1948.23,J1994.10,B1983.10,325.340113758,-33.7758802174,326.023797476,-33.5649649991,324.65398011,-33.9850593768 +B1949.25,J1980.08,B1985.58,8.88343575454,-49.4693354042,9.24701151693,-49.2998476535,8.51878534341,-49.6389915796 +B1954.32,J1994.49,B1994.40,177.029034641,-67.7755279684,177.517646511,-67.9988963388,176.544747657,-67.552257953 +B1972.10,J2015.50,B1957.08,189.451860246,-68.7071945134,190.114123213,-68.9453284555,188.797874924,-68.4686046268 +B1943.61,J1992.69,B1957.38,214.691763751,-32.6160600699,215.421492998,-32.8397553215,213.964722034,-32.3903875087 +B1954.91,J2018.83,B1966.30,18.7047162369,-32.9080620608,19.4489945613,-32.5717365496,17.9585532678,-33.2458719202 +B1955.68,J2022.94,B1951.59,322.232230099,14.4669345738,323.034821026,14.7645630389,321.42944541,14.1725191869 +B1953.00,J2016.94,B1984.39,262.175824918,51.7319974933,262.548281917,51.6846881399,261.803746337,51.7815981232 +B1930.93,J1980.75,B1988.24,294.6060041,34.0181871087,295.074015891,34.1347005761,294.137889278,33.9037336792 +B1945.15,J2003.12,B1967.50,180.08019102,26.2892216009,180.821706382,25.9664807149,179.336612509,26.6119683301 +B1936.07,J1980.42,B1980.80,291.668187169,-22.2789167174,292.329992922,-22.1864262743,291.005523355,-22.3687549985 +B1964.41,J2018.79,B1997.92,34.548669268,-15.8924906144,35.1967101241,-15.6441308582,33.9006331013,-16.1427921034 +B1963.20,J1992.50,B1964.55,78.8220157436,-37.4332268082,79.075079173,-37.4019554736,78.5689855058,-37.465204993 +B1933.72,J2019.89,B1984.33,93.1388621771,60.5731416456,95.0905202877,60.5387165097,91.184708092,60.591240446 +B1961.19,J1981.21,B1952.11,168.518071423,7.09229333513,168.777442158,6.98298378221,168.258596163,7.20150240716 +B1971.23,J2006.89,B1953.13,165.374352937,39.3890686842,165.87176885,39.196720756,164.875283704,39.5809806285 +B1948.80,J2018.63,B1990.72,255.423520875,-17.5881075751,256.438156117,-17.6826060848,254.410141307,-17.4869506738 +B1970.65,J1975.05,B1971.83,64.0990821181,36.8289797648,64.172215273,36.8396700703,64.0259656206,36.8182613277 +B1946.87,J1990.24,B1969.60,191.321958369,-52.3532066605,191.941068845,-52.5897148324,190.706679307,-52.1161877868 +B1928.29,J1976.44,B1966.53,60.3872023631,25.1025882655,61.1139601332,25.2335783606,59.6618880776,24.9686447968 +B1943.19,J2002.49,B1972.88,276.773010626,56.6051138031,277.035261703,56.6448029825,276.510294672,56.5669265636 +B1934.47,J1983.76,B1991.77,334.141397682,37.3852087993,334.681936673,37.63269657,333.601820309,37.1388490904 +B1932.42,J2004.50,B1973.34,219.417716878,-20.2290328911,220.436864842,-20.53677356,218.402163145,-19.9167676954 +B1935.55,J1975.26,B1971.06,54.0660580808,-29.3264933861,54.4742787759,-29.1973856015,53.6578513784,-29.4568765774 +B1968.98,J1989.10,B1978.54,176.26561333,-0.572718169429,176.523526883,-0.684515911301,176.007690571,-0.460953265257 +B1965.89,J2012.99,B1986.95,135.84418338,-9.94938261687,136.4156383,-10.1384151142,135.27243952,-9.76217234374 +B1956.58,J2018.60,B1952.75,305.496508312,-8.63421746611,306.333192119,-8.43166153129,304.658373119,-8.83266452583 +B1972.76,J2000.27,B1981.21,327.995002307,-58.3471659896,328.478135531,-58.216943679,327.509419894,-58.4767020929 +B1930.95,J1999.19,B1981.05,138.185539617,11.9337947187,139.11218066,11.6486009656,137.256622001,12.2148869077 +B1955.11,J1977.39,B1950.06,113.578525223,29.6301583121,113.928637253,29.5801804457,113.228110262,29.6794410184 +B1941.57,J2012.54,B1980.14,204.621895006,36.5235009134,205.408269314,36.1654570594,203.833462777,36.8838069508 +B1966.08,J2016.57,B1952.01,67.6144926088,-13.7094836718,68.1982560465,-13.6037505529,67.030977723,-13.8178646409 +B1957.99,J2018.30,B1979.29,45.3029557779,36.4639084123,46.2543764288,36.6980750272,44.3559469687,36.2257877401 +B1946.13,J2016.34,B1972.42,247.534489816,-3.23349952461,248.455025871,-3.37995755876,246.615033795,-3.08124145001 +B1960.80,J1999.98,B1967.69,287.858418461,26.2825631559,288.257968726,26.350185895,287.458797059,26.2163884515 +B1935.76,J1975.44,B1996.68,206.473163472,-38.4312130715,207.060791642,-38.6284341117,205.887695173,-38.2329839969 +B1925.84,J1992.06,B1963.36,350.362793376,-7.51631961926,351.218703416,-7.15237789524,349.505768066,-7.87933870474 +B1939.04,J2012.01,B1964.06,228.259575769,40.311002157,228.937164323,40.0423286476,227.581733934,40.5832613094 +B1955.09,J2020.54,B1975.25,319.831820932,40.7337792676,320.468436705,41.0135236496,319.195878847,40.4566449257 +B1948.03,J1989.70,B1982.34,178.349313153,-38.3854710615,178.878815281,-38.6173901794,177.821462042,-38.1536135802 +B1960.53,J1984.34,B1998.53,126.58195076,-73.6980337652,126.522212859,-73.7769656974,126.639550025,-73.6189928568 +B1933.23,J2019.21,B1951.79,257.122932676,24.0154376566,258.016684748,23.9123993004,256.229480593,24.1257542269 +B1972.01,J1994.20,B1971.16,181.414481921,-17.7858263698,181.700080407,-17.9093349916,181.129088126,-17.6623025404 +B1972.77,J2005.85,B1979.42,81.2295383474,-9.26450146427,81.6239207678,-9.2370487074,80.8352159785,-9.29320699924 +B1974.04,J2004.85,B1986.59,88.1907984871,32.4238226453,88.6946934578,32.4284817102,87.6869564835,32.4176559135 +B1927.94,J1991.17,B1958.78,285.408252018,67.7826509035,285.385328422,67.8761253941,285.427216468,67.6890523656 +B1962.02,J2007.00,B1975.53,178.262069224,51.7327600597,178.846486725,51.48241739,177.67431932,51.983025032 +B1955.03,J1997.43,B1975.01,329.433722424,-46.8960749035,330.103614247,-46.692118107,328.760372892,-47.0986245326 +B1929.45,J2009.92,B1994.64,340.333860195,36.5560891832,341.254677798,36.9791399195,339.41634063,36.1354568961 +B1974.47,J1983.10,B1969.13,191.963602676,21.3572019706,192.070505327,21.3101918576,191.856675141,21.4042306751 +B1952.44,J1984.77,B1983.14,90.8973340407,3.44588414281,91.3225022889,3.4423974082,90.4721556483,3.44803542582 +B1958.72,J1999.14,B1952.34,259.510340943,47.0512387915,259.790647567,47.0108077311,259.230159931,47.0927522883 +B1961.24,J2000.00,B1987.56,132.277954966,30.4307232942,132.867785214,30.2847386621,131.686701777,30.5750623541 +B1953.42,J2013.40,B1968.44,179.513439448,-54.44865752,180.28117417,-54.7825927917,178.751891964,-54.1147599287 +B1951.37,J1984.64,B1997.40,81.5670170865,-19.9451944488,81.9269374609,-19.9186092791,81.2071330825,-19.9729303814 +B1932.54,J2024.61,B1967.36,127.283632829,-10.0946390302,128.389991343,-10.4090695278,126.176062442,-9.78808786127 +B1937.01,J1991.26,B1984.19,234.306643184,-86.4404274379,239.159196268,-86.6062091923,229.877779523,-86.2547679857 +B1945.13,J2017.30,B1991.23,112.65584231,11.2521500479,113.653522343,11.0941650144,111.656584771,11.4036739314 +B1928.39,J2015.91,B1974.31,276.744760981,21.4151577082,277.676112471,21.4763175641,275.813216777,21.3618641896 +B1962.92,J2020.33,B1999.21,281.461357214,-15.511897988,282.28369792,-15.4461443386,280.638389569,-15.573154518 +B1942.13,J2011.97,B1980.19,306.867413859,-11.9467360888,307.826980276,-11.7108610078,305.905696925,-12.1773958306 +B1974.49,J1990.83,B1987.98,341.966066455,-2.82477813631,342.177020368,-2.73822865168,341.755054209,-2.91122392552 +B1969.43,J1976.38,B1984.23,38.6362483924,9.3322810896,38.7295143004,9.36248136387,38.5430036498,9.30204150263 +B1971.93,J2003.15,B1996.62,327.861128148,-46.529254733,328.357950708,-46.3816911362,327.362455314,-46.6760151537 +B1961.96,J2022.83,B1997.49,120.979858288,87.22617179,127.356289341,87.0356348542,113.806804821,87.3823224486 +B1926.35,J1982.80,B1999.51,297.496953653,0.839666332936,298.215707802,0.986504246823,296.777520197,0.696326958488 +B1944.12,J2012.89,B1956.31,323.316228643,-0.794522598791,324.199877999,-0.485711290555,322.43128765,-1.09980368764 +B1925.53,J1977.07,B1998.83,15.3775095611,-38.7740290611,15.975820035,-38.4977806231,14.777479512,-39.0510731102 +B1928.26,J1984.73,B1961.46,70.486199672,-24.0682131367,71.0773386429,-23.9647188389,69.8952283629,-24.1747645717 +B1959.07,J2001.01,B1959.30,106.020475905,36.6574903487,106.724489447,36.5916635763,105.315480342,36.7205573317 +B1974.33,J1998.24,B1975.46,225.719957006,-24.2326924255,226.069642,-24.3253436846,225.370713094,-24.1394598386 +B1958.31,J2014.48,B1976.52,31.0403178442,23.2187819108,31.8305300515,23.4855979353,30.252581142,22.9497454125 +B1945.76,J1981.40,B1964.13,51.4602071324,-27.0058546166,51.8377992184,-26.8827293131,51.0826217069,-27.1300027947 +B1927.06,J2019.62,B1965.51,185.697546923,55.594260797,186.80220854,55.0820030044,184.579796584,56.1075102073 +B1969.71,J1983.82,B1965.49,248.162878677,-23.7609450888,248.376028149,-23.7900358626,247.949821476,-23.7315830399 +B1960.34,J1996.74,B1963.32,308.385291884,51.2349043028,308.653885549,51.3611262862,308.116543047,51.1094272568 +B1948.94,J1982.47,B1979.67,233.050205996,63.3093356498,233.183624905,63.1972984089,232.917717277,63.4217190672 +B1935.78,J2009.44,B1960.86,209.382723191,-41.4659129842,210.508471779,-41.8212717612,208.265390379,-41.1066153061 +B1929.09,J2015.70,B1970.12,256.001743835,-16.3448051664,257.249402003,-16.4563411788,254.755864037,-16.2230882626 +B1958.66,J1984.63,B1964.43,90.8700685367,21.3678694408,91.2595104175,21.3651813051,90.4806144234,21.369574816 +B1974.74,J2003.91,B1958.69,324.057486054,57.4352750563,324.282176393,57.5669676284,323.83284781,57.3039563174 +B1954.68,J2011.04,B1961.29,159.225729446,-45.2472278228,159.836674886,-45.541209348,158.616784774,-44.9544310498 +B1967.01,J1998.76,B1999.43,7.38749687642,-53.1540997613,7.76348958513,-52.978901243,7.01015979396,-53.3294476754 +B1932.65,J1988.10,B1971.70,345.477965039,-10.1831007688,346.201723123,-9.88376000525,344.753111544,-10.4814629211 +B1968.81,J2011.71,B1991.41,234.801152081,71.8511934075,234.75819291,71.7134587916,234.849277912,71.9887729277 +B1952.24,J1992.46,B1978.63,184.754250038,-66.4894904918,185.31507512,-66.7125454196,184.198875868,-66.2662547282 +B1974.18,J2008.57,B1982.60,245.64829793,-38.7682176459,246.229734859,-38.8462753796,245.067899116,-38.6883914108 +B1961.79,J1977.75,B1986.49,176.234540627,12.5643501076,176.440478946,12.4756870596,176.028521797,12.6529921788 +B1929.65,J2019.85,B1969.56,333.536461653,-55.645568776,335.008274077,-55.1931712959,332.042112951,-56.0921730529 +B1939.61,J2001.08,B1969.64,185.716717981,-21.5568171888,186.518819162,-21.8971031217,184.916731889,-21.2160545858 +B1938.65,J1988.76,B1992.98,25.9775574253,12.7249831044,26.6478047282,12.9750431145,25.30853404,12.4734949987 +B1928.56,J2017.18,B1990.50,204.302987352,-36.6989586206,205.594712571,-37.1462777072,203.0212666,-36.2470793085 +B1959.00,J1997.12,B1991.83,221.487546141,22.5689795999,221.917649745,22.4105410037,221.057403953,22.7284735519 +B1936.24,J2008.46,B1959.40,338.956666009,-30.7135370512,339.96511112,-30.3370017697,337.943007518,-31.0875245016 +B1952.57,J2024.63,B1967.98,149.5308077,21.1458572723,150.530972008,20.7983136142,148.526893227,21.4898432119 +B1963.49,J2017.63,B1974.10,95.1983908472,-1.61163007915,95.8836923542,-1.64073778617,94.5129553374,-1.58611303786 +B1935.59,J2021.68,B1998.30,35.0615395317,-28.6207880841,36.013756926,-28.230800891,34.108208406,-29.0153533631 +B1939.64,J2018.11,B1978.17,174.903919876,-25.7547140538,175.892230462,-26.1901662894,173.918861307,-25.3199298979 +B1942.82,J1978.35,B1991.38,167.27863063,54.1842744725,167.792865117,53.9911451576,166.761561647,54.3770117742 +B1972.82,J1989.59,B1953.81,10.7133541168,-26.6356033619,10.9196530538,-26.5438625332,10.5069242085,-26.7274067313 +B1958.01,J1984.82,B1977.66,249.939886269,43.0233288254,250.152708941,42.9723921243,249.727120549,43.0747862488 +B1972.53,J1995.55,B1977.40,258.100960451,-37.3838036503,258.492060536,-37.4098004846,257.710089764,-37.356950996 +B1929.84,J1995.96,B1995.27,262.732112385,-19.8057986634,263.710992923,-19.8492450608,261.753853467,-19.7561160195 +B1938.23,J2022.91,B1968.47,149.166366188,63.2857703333,150.716387336,62.8777259431,147.585925919,63.6872128631 +B1938.61,J2021.20,B1995.06,5.4355841259,0.695799807062,6.49453219621,1.15308515723,4.37708671511,0.237709638907 +B1966.75,J2024.18,B1957.03,327.231056694,-11.1377396332,328.000338248,-10.8677167475,326.460220195,-11.4054365616 +B1965.64,J1994.07,B1954.96,284.17633852,-71.0631656787,284.986812185,-71.023307137,283.363277683,-71.1008495735 +B1939.69,J2023.20,B1998.66,59.4717008987,14.0960045791,60.6437035945,14.328043327,58.3026637155,13.8557849823 +B1957.49,J2004.52,B1997.10,112.602946077,-17.7763932222,113.128066437,-17.8781256601,112.077653347,-17.6768763275 +B1946.05,J2000.05,B1979.55,219.940310095,-26.5130440909,220.729784133,-26.7422090278,219.153387495,-26.2812240409 +B1928.73,J1989.10,B1952.60,131.216503219,-60.6790709392,131.538762739,-60.9012667241,130.892392069,-60.458301418 +B1961.94,J1983.12,B1952.51,56.1738921125,-19.3427782341,56.4110363048,-19.277323069,55.9367786527,-19.4086389221 +B1954.27,J1997.44,B1966.23,63.8293728328,-59.8347944156,64.0119166618,-59.729130492,63.6478301692,-59.9411437379 +B1942.23,J1992.36,B1968.79,312.440281577,-82.909075449,314.686445788,-82.716709549,310.089004633,-83.0931764134 +B1937.90,J2001.19,B1988.21,104.43408064,-66.6447299251,104.453108425,-66.7326176146,104.411797906,-66.5569652662 +B1939.59,J2002.19,B1992.96,210.664663673,-17.5831928536,211.524274634,-17.8816169868,209.807499075,-17.2821058793 +B1963.49,J1975.54,B1977.29,163.438155327,-54.6954182678,163.565812054,-54.7597430346,163.310636547,-54.6311360803 +B1946.22,J1989.83,B1966.19,148.024127582,2.32865180198,148.587881848,2.12205454824,147.4598279,2.53398329697 +B1939.43,J1983.41,B1970.29,317.748400264,-34.6457182874,318.424549161,-34.4635023474,317.070001399,-34.8259880426 +B1938.21,J2022.93,B1955.48,249.374885326,79.5246095403,248.102397286,79.3535339601,250.706479605,79.6856520522 +B1938.85,J1986.82,B1956.86,100.53840787,-27.7507223648,101.014813183,-27.8006647295,100.061930435,-27.7029635357 +B1967.12,J2016.63,B1987.27,23.1984832267,21.1208388177,23.8758085173,21.373531354,22.5228650317,20.8668643466 +B1933.48,J2010.39,B1993.82,71.5045009532,3.00896662959,72.5117326573,3.14121771682,70.4983597655,2.86958134305 +B1937.60,J2003.03,B1962.95,335.405788093,-6.90098238794,336.26168125,-6.56861752396,334.548406282,-7.23108048852 +B1954.59,J2010.06,B1984.28,307.588884401,18.8511389183,308.216046805,19.0408491208,306.961527852,18.6641076356 +B1973.77,J1985.60,B1967.96,343.704504442,-46.9224252956,343.875951545,-46.8591633148,343.532811099,-46.9856319247 +B1925.21,J2009.55,B1950.30,18.8112053675,35.1485289159,20.0024206727,35.5914321179,17.6282310843,34.702488267 +B1937.00,J2006.08,B1988.06,208.609805013,-46.3894275721,209.692670963,-46.7252945952,207.535889978,-46.0500943647 +B1961.33,J2010.87,B1970.70,172.978655994,15.4172636989,173.622309813,15.1433334088,172.333980129,15.6908149586 +B1937.54,J1982.63,B1966.69,7.8152324312,-34.9365736294,8.36836736742,-34.688026104,7.26063844242,-35.1854511343 +B1970.91,J1979.71,B1963.90,134.503366944,-72.4111269318,134.50595835,-72.4454843369,134.500551115,-72.3767711725 +B1950.41,J2022.56,B1979.63,149.073048424,14.7065160273,150.050293767,14.3602043524,148.092931537,15.0493015642 +B1950.13,J1995.20,B1966.26,217.406604209,16.5186514295,217.938935407,16.3200334608,216.874300735,16.7186856279 +B1960.62,J2007.17,B1996.84,241.829541848,16.5114334946,242.358489224,16.3901534117,241.300790354,16.6348221277 +B1956.99,J2023.02,B1954.80,301.991652158,46.8228690265,302.505263266,47.0190374374,301.477638783,46.6294966913 +B1939.22,J2016.87,B1994.16,280.629434995,-19.0017596678,281.769997009,-18.9177962123,279.487664877,-19.0772612574 +B1925.49,J1981.53,B1978.40,144.252375855,-10.2581330338,144.937158573,-10.5124362227,143.567309058,-10.006008783 +B1954.74,J2000.05,B1953.10,286.0305233,12.7464714044,286.556205108,12.8172544241,285.504676194,12.6779134146 +B1967.05,J1984.09,B1993.75,321.524751743,61.8464645226,321.632971919,61.9207781138,321.416450206,61.7722624353 +B1949.33,J1977.52,B1961.24,94.4962887092,-44.0946278203,94.7059415448,-44.1072194614,94.2866143528,-44.0826087841 +B1964.04,J2019.75,B1989.97,356.110922656,-39.1892569317,356.840327262,-38.8797072373,355.378110002,-39.4985381971 +B1945.63,J1988.30,B1990.09,307.190555646,-43.7191034979,307.916952389,-43.5743180165,306.461507156,-43.8614854707 +B1943.99,J1977.69,B1951.45,263.331776174,25.1917278571,263.675941152,25.1704984165,262.987636197,25.2140768109 +B1969.92,J1999.97,B1981.35,128.003624894,58.8666544649,128.605737687,58.7629529731,127.398829454,58.9689675244 +B1969.84,J2014.01,B1980.23,317.984216655,-8.89508525523,318.575678141,-8.71153679083,317.391922121,-9.07693345337 +B1961.02,J2002.85,B1953.91,312.465272698,5.18400310772,312.985580994,5.34203296971,311.944618588,5.02753402011 +B1939.24,J1981.75,B1988.65,344.0759205,-20.8070551085,344.644700272,-20.5791609914,343.505986239,-21.0343039973 +B1941.99,J1994.43,B1957.17,0.0386123471053,-42.7336081023,0.708747131881,-42.4416361362,359.365316955,-43.025582347 +B1939.26,J1987.88,B1973.18,5.95477509083,23.9728714179,6.5909526232,24.2419429246,5.32008332697,23.7034884386 +B1963.98,J2001.37,B1954.86,113.065220613,27.4191705733,113.643404238,27.3366368146,112.486263629,27.4997700815 +B1925.23,J2020.13,B1978.49,358.313822853,67.0446512684,359.505559565,67.5728883986,357.14681594,66.5167328688 +B1929.23,J2017.66,B1970.19,53.5839203362,-15.011852649,54.610957277,-14.7231509285,52.5576449921,-15.3076533827 +B1947.07,J2016.84,B1979.33,60.2557627351,25.6833225299,61.3134611254,25.8729083597,59.2011689172,25.4875201301 +B1937.55,J1985.79,B1987.44,273.08593329,76.4393919681,272.591344908,76.4526927726,273.578774594,76.4237802487 +B1970.29,J1981.68,B1994.48,25.0306798156,-51.1202356021,25.1435488988,-51.0628198065,24.9177386023,-51.177704254 +B1969.04,J1981.01,B1968.97,253.970437895,31.094899255,254.085382476,31.0765584395,253.855499113,31.1133685873 +B1960.83,J2018.73,B1964.62,168.89950144,-43.2270950714,169.584603614,-43.5437889415,168.217301809,-42.9111416391 +B1936.93,J1979.00,B1975.46,3.66775780511,39.2622225734,4.22000281563,39.4958903381,3.11745305971,39.0284106254 +B1971.26,J1994.47,B1976.64,278.936590632,6.21231840756,279.220262021,6.23271017522,278.652884808,6.19255870349 +B1973.48,J1984.09,B1955.27,285.91236301,9.40548699672,286.039106102,9.42175539099,285.785609343,9.38934433416 +B1953.91,J1995.56,B1952.30,53.8450026285,60.7259893436,54.7155379279,60.8613834632,52.9800334576,60.5877592411 +B1938.45,J2016.67,B1981.10,8.53330744443,-7.54498028811,9.52668790991,-7.1149113183,7.53943191911,-7.97616983921 +B1940.05,J2023.92,B1991.12,274.342957522,-1.24603088049,275.427502795,-1.20626637794,273.258110296,-1.27698166464 +B1956.27,J1975.21,B1952.75,80.5212647616,19.4060625392,80.8007537943,19.4231777982,80.2418410241,19.3884398656 +B1963.99,J2002.99,B1989.90,94.3827831954,15.0883386826,94.9409404907,15.0706908672,93.8245150675,15.1038774277 +B1946.06,J2012.59,B1962.21,164.473020999,-47.6965440186,165.218388831,-48.0540734375,163.731379108,-47.3403011601 +B1957.85,J1994.50,B1990.18,89.9736906625,-16.9964263489,90.3810379284,-16.9970588536,89.5663435591,-16.9972444015 +B1946.18,J1990.43,B1964.91,204.582082173,15.6789515837,205.12023156,15.4553934359,204.04377436,15.9034725087 diff --git a/astropy/coordinates/tests/accuracy/galactic_fk4.csv b/astropy/coordinates/tests/accuracy/galactic_fk4.csv new file mode 100644 index 0000000..c3c8276 --- /dev/null +++ b/astropy/coordinates/tests/accuracy/galactic_fk4.csv @@ -0,0 +1,202 @@ +# This file was generated with the ref_galactic_fk4.py script, and the reference values were computed using AST +equinox_fk4,obstime,lon_in,lat_in,ra_fk4,dec_fk4,lon_gal,lat_gal +J1998.36,B1995.95,334.661793414,43.9385116594,215.729885213,-13.2119623291,95.9916336135,-10.7923599366 +J2021.64,B1954.56,113.895199649,-14.1109832563,0.0191713429163,47.9584946764,230.354307383,2.91031092906 +J2020.49,B1953.55,66.2107722038,-7.76265420193,307.0396671,25.0473933964,202.190459847,-36.2511029663 +J1981.50,B1970.69,73.6417002791,41.7006137481,249.552478408,47.490161693,163.738209835,-0.997514227815 +J2001.47,B1960.78,204.381010469,-14.9357743223,85.7262507794,0.592842446128,319.182343564,46.4865699629 +J2005.96,B1975.98,214.396093073,-66.7648451487,38.7974895634,-25.3131215325,311.259111645,-5.26093959516 +J2006.23,B1977.93,347.225227105,6.27744217753,251.681067557,-35.6975782982,82.4439145069,-48.3754431897 +J2007.34,B1973.69,235.143754874,-5.59566003897,108.194271484,-22.3032173532,0.622684927771,37.7376079889 +J1991.60,B1960.79,269.606389512,26.7823112195,159.265817549,-27.2400623832,52.4594618492,22.7351205489 +J1980.71,B1961.97,235.285153507,-14.0695156888,99.5923664647,-26.0329761781,353.421599279,31.5338685058 +J2003.56,B1960.84,269.177331338,42.9472695107,168.194363902,-13.373076419,69.4875812789,27.7142399301 +J1990.10,B1982.78,346.070424986,-3.51848810713,260.556249219,-42.5373980474,71.1723254841,-55.2318229113 +J1984.68,B1992.32,3.01978725896,7.19732176646,261.223075691,-22.5183053503,106.371052811,-54.3443814356 +J2003.24,B1996.52,38.3199756112,18.8080489808,268.244155911,13.0679884186,153.915977612,-37.8861321281 +J2005.52,B1990.02,107.533336957,-4.33088623215,345.276777715,55.2303472065,218.881057613,2.11460956182 +J1977.27,B1984.04,236.30802591,14.3162535375,126.558516177,-13.1859909524,24.4040838917,47.3681313134 +J2024.27,B1960.36,291.532518915,-33.7960784017,65.6262288958,-78.0827780664,4.70715132794,-21.1240080657 +J1980.19,B1987.08,313.983328941,27.7572327639,204.395115343,-33.9974383642,72.5499341116,-11.3261456428 +J1995.29,B1984.85,347.273135054,-13.6880685538,273.878542915,-46.5817989568,57.2967846205,-62.636282227 +J2008.28,B1969.09,260.526724891,-37.6134342267,75.5423582477,-53.0108213216,349.993949344,-0.500521761262 +J1984.85,B1992.51,231.291118043,-27.2371455509,84.3750724965,-27.2619452007,340.394703326,24.1136027935 +J1987.09,B1976.41,258.283303492,-30.1025933842,87.5349107922,-50.9413101937,355.216758932,5.09769033822 +J2006.16,B1994.65,168.335642599,-44.084769302,44.0040901708,7.58736494962,284.861051883,15.3412175718 +J2014.94,B1991.03,117.210483914,32.8708634152,231.950026475,82.367116716,187.3264088,25.4619880653 +J2002.23,B1961.43,158.272058119,-29.286471988,46.4761761399,24.2223508812,269.917276667,24.5785034911 +J1984.88,B1991.03,262.688069789,-48.1516431413,57.7755536872,-52.5013674166,342.226051771,-7.92146528355 +J2014.21,B1956.93,357.845250924,19.2890677934,248.037990583,-19.4340699812,103.672360905,-41.3775036599 +J2015.72,B1974.12,243.674536239,-10.0431678136,108.027253494,-31.863249456,2.91619105856,28.4959537625 +J2010.54,B1957.44,284.696106425,19.6051067047,170.340404941,-40.0951306839,51.2254926849,7.33605738412 +J2022.20,B1972.41,61.5291328053,18.6403709997,277.730191309,33.3416109651,174.063892959,-24.5412790814 +J2017.75,B1983.30,9.66573928438,-22.9075078717,295.17981175,-30.0450764744,85.3259571782,-84.8466105492 +J2023.18,B1989.45,288.133287813,-36.6947385674,61.7090085882,-74.1820684991,0.817296879039,-19.4797887996 +J1998.23,B1983.10,325.340113758,-33.7758802174,307.31206399,-69.6283338955,11.6623486171,-48.8815187305 +J1999.25,B1985.58,8.88343575454,-49.4693354042,325.965770063,-35.6133692502,309.666273629,-67.4551398942 +J2004.32,B1994.40,177.029034641,-67.7755279684,31.2372626731,-12.9650951893,296.955672515,-5.62000346764 +J2022.10,B1957.08,189.451860246,-68.7071945134,33.5293665419,-17.1203080138,301.559917262,-5.75405801934 +J1993.61,B1957.38,214.691763751,-32.6160600699,73.7224767298,-15.6028544376,323.538376206,26.6926709764 +J2004.91,B1966.30,18.7047162369,-32.9080620608,308.505564328,-25.5373410674,263.547066418,-82.3338996972 +J2005.68,B1951.59,322.232230099,14.4669345738,219.553504168,-44.4049264885,66.7343979667,-25.6090866517 +J2003.00,B1984.39,262.175824918,51.7319974933,169.003247618,-3.42937646572,78.8860186239,33.5626186817 +J1980.93,B1988.24,294.6060041,34.0181871087,184.771961476,-28.2403711462,68.3561968833,5.91397226579 +J1995.15,B1967.50,180.08019102,26.2892216009,115.670140935,39.4176352042,214.406973761,78.6105433559 +J1986.07,B1980.80,291.668187169,-22.2789167174,125.910652709,-78.6378819053,16.4272341834,-17.5632578893 +J2014.41,B1997.92,34.548669268,-15.8924906144,297.966081457,-5.74276095396,188.103833481,-67.1344687124 +J2013.20,B1964.55,78.8220157436,-37.4332268082,338.41386544,13.3803692475,241.413633182,-34.4957267196 +J1983.72,B1984.33,93.1388621771,60.5731416456,215.515242863,51.025917079,153.788670192,19.0304556569 +J2011.19,B1952.11,168.518071423,7.09229333513,86.7960140054,42.4095753728,249.125769518,59.3639239957 +J2021.23,B1953.13,165.374352937,39.3890686842,133.539016217,52.7585931969,177.607817121,63.8456065457 +J1998.80,B1990.72,255.423520875,-17.5881075751,104.861453622,-45.3027201728,3.92822669713,14.6469670383 +J2020.65,B1971.83,64.0990821181,36.8289797648,256.321772318,39.914355366,162.091739264,-10.1708006469 +J1996.87,B1969.60,191.321958369,-52.3532066605,47.9455555868,-9.26762335019,302.005131299,10.4906094438 +J1978.29,B1966.53,60.3872023631,25.1025882655,269.550425169,34.4047511633,168.612591713,-20.2821406332 +J1993.19,B1972.88,276.773010626,56.6051138031,178.703826596,-3.3434530167,85.5633134045,25.6194736799 +J1984.47,B1991.77,334.141397682,37.3852087993,218.479404766,-19.0772794069,91.968516951,-15.9701580762 +J1982.42,B1973.34,219.417716878,-20.2290328911,87.1109593231,-14.6004706289,334.420144542,35.926923956 +J1985.55,B1971.06,54.0660580808,-29.3264933861,318.139103598,3.17485146412,226.277308044,-53.6788644695 +J2018.98,B1978.54,176.26561333,-0.572718169429,83.8491889252,31.8061689325,269.795526718,58.0046040213 +J2015.89,B1986.95,135.84418338,-9.94938261687,33.531734841,50.9381624137,238.492810285,23.3637333893 +J2006.58,B1952.75,305.496508312,-8.63421746611,200.932667574,-71.3815196871,35.373262982,-23.9548001451 +J2022.76,B1981.21,327.995002307,-58.3471659896,350.834964718,-54.0901563813,335.114901978,-45.751212695 +J1980.95,B1981.05,138.185539617,11.9337947187,58.631520613,69.0711593141,218.447399995,36.9072609382 +J2005.11,B1950.06,113.578525223,29.6301583121,263.636462216,81.3987773508,189.681688921,21.6122927944 +J1991.57,B1980.14,204.621895006,36.5235009134,133.680798913,22.0411777739,78.808513096,76.2737160193 +J2016.08,B1952.01,67.6144926088,-13.7094836718,313.132620076,22.6386092017,209.654020089,-37.5533649541 +J2007.99,B1979.29,45.3029557779,36.4639084123,253.115407251,25.0961411128,150.086524604,-19.5316401959 +J1996.13,B1972.42,247.534489816,-3.23349952461,117.046928774,-32.0174103517,11.9107776158,29.166414441 +J2010.80,B1967.69,287.858418461,26.2825631559,176.528272624,-34.7137695077,58.5625336575,7.68664923227 +J1985.76,B1996.68,206.473163472,-38.4312130715,65.3730874222,-11.8626802001,314.662473159,23.1067145187 +J1975.84,B1963.36,350.362793376,-7.51631961926,267.890694432,-41.0314521815,72.0362114405,-61.1485139616 +J1989.04,B1964.06,228.259575769,40.311002157,144.950655363,6.68810736787,66.3208521242,57.9479583999 +J2005.09,B1975.25,319.831820932,40.7337792676,206.517019036,-20.3728721852,85.376518971,-6.19125657467 +J1998.03,B1982.34,178.349313153,-38.3854710615,53.3367731423,6.47139870346,290.627978018,23.0870158337 +J2010.53,B1998.53,126.58195076,-73.6980337652,14.0339286057,-10.7989965101,287.063023216,-19.6793830621 +J1983.23,B1951.79,257.122932676,24.0154376566,147.559357599,-22.3222385045,45.3959106575,32.4293515474 +J2022.01,B1971.16,181.414481921,-17.7858263698,71.6065443096,17.7205783522,287.370198984,43.7863289812 +J2022.77,B1979.42,81.2295383474,-9.26450146427,319.219354065,35.7151156735,211.413414641,-23.6522137945 +J2024.04,B1986.59,88.1907984871,32.4238226453,264.445366137,59.5075300274,177.63221018,2.82201785023 +J1977.94,B1958.78,285.408252018,67.7826509035,186.006666089,5.9317624038,98.5359798987,24.0369224888 +J2012.02,B1975.53,178.262069224,51.7327600597,149.616027267,42.1293667525,143.189803372,62.9449441105 +J2005.03,B1975.01,329.433722424,-46.8960749035,333.526505486,-61.2774858546,350.879342803,-50.7102611905 +J1979.45,B1994.64,340.333860195,36.5560891832,223.292370195,-17.1540759917,95.9213163317,-19.343112709 +J2024.47,B1969.13,191.963602676,21.3572019706,114.434412024,27.503830072,291.719299139,84.2576478027 +J2002.44,B1983.14,90.8973340407,3.44588414281,314.995461083,51.3170142615,204.308033362,-9.04466471803 +J2008.72,B1952.34,259.510340943,47.0512387915,164.869262172,-6.16993929742,73.122316095,35.0776892287 +J2011.24,B1987.56,132.277954966,30.4307232942,127.48825526,81.1338000052,193.90517209,37.3016929434 +J2003.42,B1968.44,179.513439448,-54.44865752,42.0526470828,-4.9188999851,295.12094312,7.62776271377 +J2001.37,B1997.40,81.5670170865,-19.9451944488,328.061837334,28.1928252216,222.515151909,-27.3578081826 +J1982.54,B1967.36,127.283632829,-10.0946390302,19.6477131913,52.4470778861,233.960957731,16.5452929993 +J1987.01,B1984.19,234.306643184,-86.4404274379,16.4715901103,-28.4445608636,305.546826719,-24.5135301459 +J1995.13,B1991.23,112.65584231,11.2521500479,339.527728916,71.3900724033,207.206370175,13.7939581949 +J1978.39,B1974.31,276.744760981,21.4151577082,162.407802968,-35.1255983687,49.8499233813,14.5681779743 +J2012.92,B1999.21,281.461357214,-15.511897988,128.74174115,-66.9721136327,18.3606180783,-5.64535469428 +J1992.13,B1980.19,306.867413859,-11.9467360888,207.123394118,-74.3365109128,32.8334594598,-26.7428218865 +J2024.49,B1987.98,341.966066455,-2.82477813631,257.079269135,-45.5062849208,66.5166383458,-51.5988768204 +J2019.43,B1984.23,38.6362483924,9.3322810896,277.298399175,9.29190831823,160.679381282,-46.0098367014 +J2021.93,B1996.62,327.861128148,-46.529254733,334.498175895,-62.285753657,351.779204131,-49.5514803605 +J2011.96,B1997.49,120.979858288,87.22617179,193.112689708,29.8355211732,125.900479598,27.8877168914 +J1976.35,B1999.51,297.496953653,0.839666332936,181.085417356,-61.4154773919,40.6846762696,-12.8402998175 +J1994.12,B1956.31,323.316228643,-0.794522598791,232.744583525,-57.172811607,53.4475657454,-35.7640525677 +J1975.53,B1998.83,15.3775095611,-38.7740290611,313.687711979,-29.7532661336,292.123823728,-78.249288242 +J1978.26,B1961.46,70.486199672,-24.0682131367,323.035235152,17.9223915313,223.250351558,-38.1828370964 +J2009.07,B1959.30,106.020475905,36.6574903487,244.972778375,72.7878865437,180.327652321,18.0468441903 +J2024.33,B1975.46,225.719957006,-24.2326924255,86.0366112823,-21.5082578389,337.38182103,29.8986234954 +J2008.31,B1976.52,31.0403178442,23.2187819108,261.302299942,8.78047554861,143.772447485,-36.7511639142 +J1995.76,B1964.13,51.4602071324,-27.0058546166,315.065766587,2.55796617517,221.775446591,-55.7242078312 +J1977.06,B1965.51,185.697546923,55.594260797,153.338541661,37.440299398,131.049487592,61.2247044105 +J2019.71,B1965.49,248.162878677,-23.7609450888,94.0591617121,-40.8224910786,354.392971779,16.4900856964 +J2010.34,B1963.32,308.385291884,51.2349043028,196.476156832,-11.5444983515,88.2071202318,6.64482242886 +J1998.94,B1979.67,233.050205996,63.3093356498,166.867510536,15.6637804493,98.5156204798,45.4612418027 +J1985.78,B1960.86,209.382723191,-41.4659129842,63.4734186744,-15.1442383851,316.163237599,19.6100595169 +J1979.09,B1970.12,256.001743835,-16.3448051664,106.649451565,-45.3254094121,5.44744564064,14.6683716892 +J2008.66,B1964.43,90.8700685367,21.3678694408,287.008253726,60.3030901692,188.510594416,-0.450177420343 +J2024.74,B1958.69,324.057486054,57.4352750563,204.387834151,-3.71156697525,98.8294418875,3.92980570683 +J2004.68,B1961.29,159.225729446,-45.2472278228,37.9943842023,10.4609560801,279.445767043,11.4162684154 +J2017.01,B1999.43,7.38749687642,-53.1540997613,330.796893447,-36.432735539,310.561617023,-63.5428435122 +J1982.65,B1971.70,345.477965039,-10.1831007688,267.939382589,-46.5465175303,61.3892410717,-59.2635781091 +J2018.81,B1991.41,234.801152081,71.8511934075,175.271116162,19.166721209,107.275353666,39.7113209648 +J2002.24,B1978.63,184.754250038,-66.4894904918,34.1841541814,-14.4958117979,299.686716649,-3.81069739954 +J2024.18,B1982.60,245.64829793,-38.7682176459,73.9547095701,-41.3059410321,341.54798842,7.97381666623 +J2011.79,B1986.49,176.234540627,12.5643501076,97.7788131928,38.1835469935,252.339039389,68.6211958128 +J1979.65,B1969.56,333.536461653,-55.645568776,343.316605394,-54.0790917446,336.652926202,-50.1180340532 +J1989.61,B1969.64,185.716717981,-21.5568171888,70.5091730065,12.162471916,294.316919615,40.7880063819 +J1988.65,B1992.98,25.9775574253,12.7249831044,268.242055984,-0.265815951959,142.501478254,-48.0628732689 +J1978.56,B1990.50,204.302987352,-36.6989586206,66.0962720642,-9.57087489783,313.310472546,25.1146611916 +J2009.00,B1991.83,221.487546141,22.5689795999,126.853873425,2.99323159891,29.3329007943,63.7844492045 +J1986.24,B1959.40,338.956666009,-30.7135370512,297.785788922,-58.4349148844,18.009575125,-60.3855752943 +J2002.57,B1967.98,149.5308077,21.1458572723,94.8524518306,65.0524091023,211.851072125,50.1739306174 +J2013.49,B1974.10,95.1983908472,-1.61163007915,325.627893674,50.8317830992,210.781640873,-7.70049786191 +J1985.59,B1998.30,35.0615395317,-28.6207880841,309.362547589,-10.9681750175,222.2815235,-70.2039555167 +J1989.64,B1978.17,174.903919876,-25.7547140538,60.6700456981,17.1698957102,283.434726362,34.3456435324 +J1992.82,B1991.38,167.27863063,54.1842744725,155.134423626,47.955156172,150.615261991,57.1592810224 +J2022.82,B1953.81,10.7133541168,-26.6356033619,299.700062775,-30.2854018223,42.0331929301,-87.8059255496 +J2008.01,B1977.66,249.939886269,43.0233288254,157.107675338,-4.65986958657,67.7233796,41.7069452763 +J2022.53,B1977.40,258.100960451,-37.3838036503,76.1621887374,-51.0815347034,348.967532389,1.36680578377 +J1979.84,B1995.27,262.732112385,-19.8057986634,105.205356822,-52.5372854117,6.11993733263,7.43500801549 +J1988.23,B1968.47,149.166366188,63.2857703333,174.688450428,50.0778473898,148.575911001,44.2672282651 +J1988.61,B1995.06,5.4355841259,0.695799807062,268.674810006,-23.9106016257,107.664837783,-61.2469672937 +J2016.75,B1957.03,327.231056694,-11.1377396332,252.951382603,-62.1498935414,44.0578705009,-44.1656816269 +J2015.64,B1954.96,284.17633852,-71.0631656787,21.4861837786,-44.7317770239,324.040057511,-25.8326289675 +J1989.69,B1998.66,59.4717008987,14.0960045791,281.495047329,29.7732755945,176.567960403,-28.6862393249 +J2007.49,B1997.10,112.602946077,-17.7763932222,359.226063676,44.0329923131,233.074018942,0.179492159778 +J1996.05,B1979.55,219.940310095,-26.5130440909,81.4465458752,-17.5043303516,331.149281423,30.3269968045 +J1978.73,B1952.60,131.216503219,-60.6790709392,16.634154156,1.81850032112,276.99056645,-10.9885663722 +J2011.94,B1952.51,56.1738921125,-19.3427782341,311.186999671,10.4337104739,211.304689863,-49.7748029903 +J2004.27,B1966.23,63.8293728328,-59.8347944156,346.99277437,-9.45023426908,270.969703277,-42.5256665899 +J1992.23,B1968.79,312.440281577,-82.909075449,11.3540896839,-34.157150499,310.093208312,-30.498791986 +J1987.90,B1988.21,104.43408064,-66.6447299251,5.45340291535,-4.88740816946,277.212698727,-24.1397093769 +J1989.59,B1992.96,210.664663673,-17.5831928536,86.0165552167,-5.98416607071,326.19870132,41.9722447163 +J2013.49,B1977.29,163.438155327,-54.6954182678,35.0847453795,1.14728902679,286.284423558,4.38048943444 +J1996.22,B1966.19,148.024127582,2.32865180198,61.1653789554,55.4628176364,235.147968548,40.5062869945 +J1989.43,B1970.29,317.748400264,-34.6457182874,316.831643302,-75.2962612516,9.65732035894,-42.8175894028 +J1988.21,B1955.48,249.374885326,79.5246095403,183.718980722,20.6913682813,112.565520636,32.5641832214 +J1988.85,B1956.86,100.53840787,-27.7507223648,349.546329312,30.9972476643,237.218549213,-14.0500966148 +J2017.12,B1987.27,23.1984832267,21.1208388177,259.952338902,1.35554765265,135.43691251,-40.8550069313 +J1983.48,B1993.82,71.5045009532,3.00896662959,299.603452073,35.2702997539,194.634394328,-25.8112525209 +J1987.60,B1962.95,335.405788093,-6.90098238794,255.324007551,-53.1294372006,55.8410528163,-49.2268304977 +J2004.59,B1984.28,307.588884401,18.8511389183,199.036872823,-43.8235513129,61.4614776755,-11.7984990917 +J2023.77,B1967.96,343.704504442,-46.9224252956,326.142463635,-52.3943400975,344.29956144,-59.491008965 +J1975.21,B1950.30,18.8112053675,35.1485289159,245.286476487,4.93182941858,128.71934377,-27.3093230426 +J1987.00,B1988.06,208.609805013,-46.3894275721,58.5433364885,-16.5301679525,314.238428253,15.0130783715 +J2011.33,B1970.70,172.978655994,15.4172636989,99.5237786852,42.2246688539,240.910040544,68.0540051284 +J1987.54,B1966.69,7.8152324312,-34.9365736294,307.969684049,-34.8499910529,330.146180835,-81.2012202729 +J2020.91,B1963.90,134.503366944,-72.4111269318,16.6485961895,-9.74558853146,287.251430013,-17.0236932061 +J2000.41,B1979.63,149.073048424,14.7065160273,81.1217800627,62.690435672,220.791567295,47.4907829088 +J2000.13,B1966.26,217.406604209,16.5186514295,119.582956349,3.61620654175,12.9778311665,64.930795113 +J2010.62,B1996.84,241.829541848,16.5114334946,131.926345089,-16.4641006337,30.2129120083,43.7266326194 +J2006.99,B1954.80,301.991652158,46.8228690265,192.281654734,-16.0819438067,82.1831278354,7.65942031664 +J1989.22,B1994.16,280.629434995,-19.0017596678,119.763163085,-67.9434807772,15.0327362766,-6.7748272396 +J1975.49,B1978.40,144.252375855,-10.2581330338,44.2663724488,47.1780541535,244.906125318,30.0146889384 +J2004.74,B1953.10,286.0305233,12.7464714044,168.403291875,-46.9082451034,45.684929198,3.05860565516 +J2017.05,B1993.75,321.524751743,61.8464645226,201.729972147,0.142282640146,100.9998867,8.01195048206 +J1999.33,B1961.24,94.4962887092,-44.0946278203,352.197455758,14.1646347321,251.750996447,-24.2178084719 +J2014.04,B1989.97,356.110922656,-39.1892569317,312.562642311,-44.4976591581,347.918750957,-71.4003276728 +J1995.63,B1990.09,307.190555646,-43.7191034979,2.19409366458,-73.0808365619,356.907677491,-35.5140842361 +J1993.99,B1951.45,263.331776174,25.1917278571,153.169840501,-25.2115873395,48.721484719,27.5451183286 +J2019.92,B1981.35,128.003624894,58.8666544649,188.132407462,57.9560573023,158.151363881,35.6484565698 +J2019.84,B1980.23,317.984216655,-8.89508525523,234.105102561,-66.9359336144,41.2540921899,-35.0019322625 +J2011.02,B1953.91,312.465272698,5.18400310772,210.387316474,-56.4513187131,52.2045687218,-23.2783213403 +J1989.24,B1988.65,344.0759205,-20.8070551085,282.194864755,-52.0563161521,40.4226717361,-63.1239129244 +J1991.99,B1957.17,0.0386123471053,-42.7336081023,317.113101073,-41.6722671173,333.272026627,-71.3154908857 +J1989.26,B1973.18,5.95477509083,23.9728714179,248.645776385,-10.559858406,115.039830262,-38.4244563396 +J2013.98,B1954.86,113.065220613,27.4191705733,278.378621564,81.2392172048,191.670829361,20.3160887911 +J1975.23,B1978.49,358.313822853,67.0446512684,211.780718733,12.9835628445,117.438946252,4.91999007663 +J1979.23,B1970.19,53.5839203362,-15.011852649,305.729478083,10.5974755105,203.68085793,-50.0750506428 +J1997.07,B1979.33,60.2557627351,25.6833225299,269.003573186,34.456942967,167.932574018,-20.1669832622 +J1987.55,B1987.44,273.08593329,76.4393919681,185.756212072,15.2737854446,107.678579719,28.6298222935 +J2020.29,B1994.48,25.0306798156,-51.1202356021,329.572709072,-25.482154153,285.52934787,-64.2572780699 +J2019.04,B1968.97,253.970437895,31.094899255,151.036217235,-15.4523165082,52.7995114065,37.292668285 +J2010.83,B1964.62,168.89950144,-43.2270950714,44.9384665428,8.00028298712,284.864643848,16.2980080563 +J1986.93,B1975.46,3.66775780511,39.2622225734,235.238676188,-2.72528403932,115.361704733,-23.0060112816 +J2021.26,B1976.64,278.936590632,6.21231840756,154.692659833,-49.4790598243,36.5657932055,6.53011956709 +J2023.48,B1955.27,285.91236301,9.40548699672,166.481412833,-50.0386245303,42.5308356905,1.81955485147 +J2003.91,B1952.30,53.8450026285,60.7259893436,226.147190382,33.5643817186,141.659779689,3.93637165962 +J1988.45,B1981.10,8.53330744443,-7.54498028811,278.255981158,-25.1174817952,110.761077739,-69.9571108312 +J1990.05,B1991.12,274.342957522,-1.24603088049,141.305593094,-52.2551761295,27.9749402015,6.88675256757 +J2006.27,B1952.75,80.5212647616,19.4060625392,284.864123639,50.4777772377,185.129288864,-9.72047187101 +J2013.99,B1989.90,94.3827831954,15.0883386826,301.493234394,60.9106556021,195.582917069,-0.623869405575 +J1996.06,B1962.21,164.473020999,-47.6965440186,39.4946578672,6.36223553605,283.94830707,10.9316648861 +J2007.85,B1990.18,89.9736906625,-16.9964263489,333.217747094,35.5496625256,222.809109826,-18.9265144618 +J1996.18,B1964.91,204.582082173,15.6789515837,113.317697869,14.3475448707,348.941256944,74.1578851882 diff --git a/astropy/coordinates/tests/accuracy/generate_ref_ast.py b/astropy/coordinates/tests/accuracy/generate_ref_ast.py new file mode 100644 index 0000000..aed93c1 --- /dev/null +++ b/astropy/coordinates/tests/accuracy/generate_ref_ast.py @@ -0,0 +1,257 @@ +""" +This series of functions are used to generate the reference CSV files +used by the accuracy tests. Running this as a comand-line script will +generate them all. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import os + +import numpy as np + +from ....table import Table, Column +from ....extern.six.moves import range + + +def ref_fk4_no_e_fk4(fnout='fk4_no_e_fk4.csv'): + """ + Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK4 + conversion, with arbitrary equinoxes and epoch of observation. + """ + + import starlink.Ast as Ast + + np.random.seed(12345) + + N = 200 + + # Sample uniformly on the unit sphere. These will be either the FK4 + # coordinates for the transformation to FK5, or the FK5 coordinates for the + # transformation to FK4. + ra = np.random.uniform(0., 360., N) + dec = np.degrees(np.arcsin(np.random.uniform(-1., 1., N))) + + # Generate random observation epoch and equinoxes + obstime = ["B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)] + + ra_fk4ne, dec_fk4ne = [], [] + ra_fk4, dec_fk4 = [], [] + + for i in range(N): + + # Set up frames for AST + frame_fk4ne = Ast.SkyFrame('System=FK4-NO-E,Epoch={epoch},Equinox=B1950'.format(epoch=obstime[i])) + frame_fk4 = Ast.SkyFrame('System=FK4,Epoch={epoch},Equinox=B1950'.format(epoch=obstime[i])) + + # FK4 to FK4 (no E-terms) + frameset = frame_fk4.convert(frame_fk4ne) + coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) + ra_fk4ne.append(coords[0, 0]) + dec_fk4ne.append(coords[1, 0]) + + # FK4 (no E-terms) to FK4 + frameset = frame_fk4ne.convert(frame_fk4) + coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) + ra_fk4.append(coords[0, 0]) + dec_fk4.append(coords[1, 0]) + + # Write out table to a CSV file + t = Table() + t.add_column(Column(name='obstime', data=obstime)) + t.add_column(Column(name='ra_in', data=ra)) + t.add_column(Column(name='dec_in', data=dec)) + t.add_column(Column(name='ra_fk4ne', data=ra_fk4ne)) + t.add_column(Column(name='dec_fk4ne', data=dec_fk4ne)) + t.add_column(Column(name='ra_fk4', data=ra_fk4)) + t.add_column(Column(name='dec_fk4', data=dec_fk4)) + f = open(fnout, 'wb') + f.write("# This file was generated with the {0} script, and the reference " + "values were computed using AST\n".format(os.path.basename(__file__))) + t.write(f, format='ascii', delimiter=',') + + +def ref_fk4_no_e_fk5(fnout='fk4_no_e_fk5.csv'): + """ + Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK5 + conversion, with arbitrary equinoxes and epoch of observation. + """ + + import starlink.Ast as Ast + + np.random.seed(12345) + + N = 200 + + # Sample uniformly on the unit sphere. These will be either the FK4 + # coordinates for the transformation to FK5, or the FK5 coordinates for the + # transformation to FK4. + ra = np.random.uniform(0., 360., N) + dec = np.degrees(np.arcsin(np.random.uniform(-1., 1., N))) + + # Generate random observation epoch and equinoxes + obstime = ["B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)] + equinox_fk4 = ["B{0:7.2f}".format(x) for x in np.random.uniform(1925., 1975., N)] + equinox_fk5 = ["J{0:7.2f}".format(x) for x in np.random.uniform(1975., 2025., N)] + + ra_fk4, dec_fk4 = [], [] + ra_fk5, dec_fk5 = [], [] + + for i in range(N): + + # Set up frames for AST + frame_fk4 = Ast.SkyFrame('System=FK4-NO-E,Epoch={epoch},Equinox={equinox_fk4}'.format(epoch=obstime[i], equinox_fk4=equinox_fk4[i])) + frame_fk5 = Ast.SkyFrame('System=FK5,Epoch={epoch},Equinox={equinox_fk5}'.format(epoch=obstime[i], equinox_fk5=equinox_fk5[i])) + + # FK4 to FK5 + frameset = frame_fk4.convert(frame_fk5) + coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) + ra_fk5.append(coords[0, 0]) + dec_fk5.append(coords[1, 0]) + + # FK5 to FK4 + frameset = frame_fk5.convert(frame_fk4) + coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) + ra_fk4.append(coords[0, 0]) + dec_fk4.append(coords[1, 0]) + + # Write out table to a CSV file + t = Table() + t.add_column(Column(name='equinox_fk4', data=equinox_fk4)) + t.add_column(Column(name='equinox_fk5', data=equinox_fk5)) + t.add_column(Column(name='obstime', data=obstime)) + t.add_column(Column(name='ra_in', data=ra)) + t.add_column(Column(name='dec_in', data=dec)) + t.add_column(Column(name='ra_fk5', data=ra_fk5)) + t.add_column(Column(name='dec_fk5', data=dec_fk5)) + t.add_column(Column(name='ra_fk4', data=ra_fk4)) + t.add_column(Column(name='dec_fk4', data=dec_fk4)) + f = open(fnout, 'wb') + f.write("# This file was generated with the {0} script, and the reference " + "values were computed using AST\n".format(os.path.basename(__file__))) + t.write(f, format='ascii', delimiter=',') + + +def ref_galactic_fk4(fnout='galactic_fk4.csv'): + """ + Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5 + conversion, with arbitrary equinoxes and epoch of observation. + """ + + import starlink.Ast as Ast + + np.random.seed(12345) + + N = 200 + + # Sample uniformly on the unit sphere. These will be either the ICRS + # coordinates for the transformation to FK5, or the FK5 coordinates for the + # transformation to ICRS. + lon = np.random.uniform(0., 360., N) + lat = np.degrees(np.arcsin(np.random.uniform(-1., 1., N))) + + # Generate random observation epoch and equinoxes + obstime = ["B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)] + equinox_fk4 = ["J{0:7.2f}".format(x) for x in np.random.uniform(1975., 2025., N)] + + lon_gal, lat_gal = [], [] + ra_fk4, dec_fk4 = [], [] + + for i in range(N): + + # Set up frames for AST + frame_gal = Ast.SkyFrame('System=Galactic,Epoch={epoch}'.format(epoch=obstime[i])) + frame_fk4 = Ast.SkyFrame('System=FK4,Epoch={epoch},Equinox={equinox_fk4}'.format(epoch=obstime[i], equinox_fk4=equinox_fk4[i])) + + # ICRS to FK5 + frameset = frame_gal.convert(frame_fk4) + coords = np.degrees(frameset.tran([[np.radians(lon[i])], [np.radians(lat[i])]])) + ra_fk4.append(coords[0, 0]) + dec_fk4.append(coords[1, 0]) + + # FK5 to ICRS + frameset = frame_fk4.convert(frame_gal) + coords = np.degrees(frameset.tran([[np.radians(lon[i])], [np.radians(lat[i])]])) + lon_gal.append(coords[0, 0]) + lat_gal.append(coords[1, 0]) + + # Write out table to a CSV file + t = Table() + t.add_column(Column(name='equinox_fk4', data=equinox_fk4)) + t.add_column(Column(name='obstime', data=obstime)) + t.add_column(Column(name='lon_in', data=lon)) + t.add_column(Column(name='lat_in', data=lat)) + t.add_column(Column(name='ra_fk4', data=ra_fk4)) + t.add_column(Column(name='dec_fk4', data=dec_fk4)) + t.add_column(Column(name='lon_gal', data=lon_gal)) + t.add_column(Column(name='lat_gal', data=lat_gal)) + f = open(fnout, 'wb') + f.write("# This file was generated with the {0} script, and the reference " + "values were computed using AST\n".format(os.path.basename(__file__))) + t.write(f, format='ascii', delimiter=',') + + +def ref_icrs_fk5(fnout='icrs_fk5.csv'): + """ + Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5 + conversion, with arbitrary equinoxes and epoch of observation. + """ + + import starlink.Ast as Ast + + np.random.seed(12345) + + N = 200 + + # Sample uniformly on the unit sphere. These will be either the ICRS + # coordinates for the transformation to FK5, or the FK5 coordinates for the + # transformation to ICRS. + ra = np.random.uniform(0., 360., N) + dec = np.degrees(np.arcsin(np.random.uniform(-1., 1., N))) + + # Generate random observation epoch and equinoxes + obstime = ["B{0:7.2f}".format(x) for x in np.random.uniform(1950., 2000., N)] + equinox_fk5 = ["J{0:7.2f}".format(x) for x in np.random.uniform(1975., 2025., N)] + + ra_icrs, dec_icrs = [], [] + ra_fk5, dec_fk5 = [], [] + + for i in range(N): + + # Set up frames for AST + frame_icrs = Ast.SkyFrame('System=ICRS,Epoch={epoch}'.format(epoch=obstime[i])) + frame_fk5 = Ast.SkyFrame('System=FK5,Epoch={epoch},Equinox={equinox_fk5}'.format(epoch=obstime[i], equinox_fk5=equinox_fk5[i])) + + # ICRS to FK5 + frameset = frame_icrs.convert(frame_fk5) + coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) + ra_fk5.append(coords[0, 0]) + dec_fk5.append(coords[1, 0]) + + # FK5 to ICRS + frameset = frame_fk5.convert(frame_icrs) + coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]])) + ra_icrs.append(coords[0, 0]) + dec_icrs.append(coords[1, 0]) + + # Write out table to a CSV file + t = Table() + t.add_column(Column(name='equinox_fk5', data=equinox_fk5)) + t.add_column(Column(name='obstime', data=obstime)) + t.add_column(Column(name='ra_in', data=ra)) + t.add_column(Column(name='dec_in', data=dec)) + t.add_column(Column(name='ra_fk5', data=ra_fk5)) + t.add_column(Column(name='dec_fk5', data=dec_fk5)) + t.add_column(Column(name='ra_icrs', data=ra_icrs)) + t.add_column(Column(name='dec_icrs', data=dec_icrs)) + f = open(fnout, 'wb') + f.write("# This file was generated with the {0} script, and the reference " + "values were computed using AST\n".format(os.path.basename(__file__))) + t.write(f, format='ascii', delimiter=',') + + +if __name__ == '__main__': + ref_fk4_no_e_fk4() + ref_fk4_no_e_fk5() + ref_galactic_fk4() + ref_icrs_fk5() diff --git a/astropy/coordinates/tests/accuracy/icrs_fk5.csv b/astropy/coordinates/tests/accuracy/icrs_fk5.csv new file mode 100644 index 0000000..ed6703c --- /dev/null +++ b/astropy/coordinates/tests/accuracy/icrs_fk5.csv @@ -0,0 +1,202 @@ +# This file was generated with the ref_icrs_fk5.py script, and the reference values were computed using AST +equinox_fk5,obstime,ra_in,dec_in,ra_fk5,dec_fk5,ra_icrs,dec_icrs +J1998.36,B1995.95,334.661793414,43.9385116594,334.644564717,43.9302620645,334.679023415,43.9467624314 +J2021.64,B1954.56,113.895199649,-14.1109832563,114.144749047,-14.1600275394,113.645603942,-14.0624187531 +J2020.49,B1953.55,66.2107722038,-7.76265420193,66.4590983513,-7.71687128381,65.9625042534,-7.80888947142 +J1981.50,B1970.69,73.6417002791,41.7006137481,73.3167722987,41.6713224382,73.9668646614,41.7293444168 +J2001.47,B1960.78,204.381010469,-14.9357743223,204.400749583,-14.9432299686,204.361272512,-14.9283175102 +J2005.96,B1975.98,214.396093073,-66.7648451487,214.51622501,-66.7922023737,214.276152292,-66.7374486425 +J2006.23,B1977.93,347.225227105,6.27744217753,347.304207997,6.31127500827,347.146246763,6.24361991082 +J2007.34,B1973.69,235.143754874,-5.59566003897,235.241093646,-5.61898190462,235.046433786,-5.57228120384 +J1991.60,B1960.79,269.606389512,26.7823112195,269.522379939,26.7826702924,269.690399178,26.7820207078 +J1980.71,B1961.97,235.285153507,-14.0695156888,235.015999226,-14.0081475332,235.554479961,-14.1304690349 +J2003.56,B1960.84,269.177331338,42.9472695107,269.20449399,42.9469939989,269.150168743,42.9475544195 +J1990.10,B1982.78,346.070424986,-3.51848810713,345.942775401,-3.57196685618,346.198054805,-3.46497978924 +J1984.68,B1992.32,3.01978725896,7.19732176646,2.82298721926,7.11213924582,3.21663102538,7.28248887117 +J2003.24,B1996.52,38.3199756112,18.8080489808,38.3653094841,18.8221903901,38.2746486329,18.7938987191 +J2005.52,B1990.02,107.533336957,-4.33088623215,107.601845445,-4.34016819794,107.464824543,-4.32163930179 +J1977.27,B1984.04,236.30802591,14.3162535375,236.043743614,14.3866995821,236.572362968,14.2462932004 +J2024.27,B1960.36,291.532518915,-33.7960784017,291.927410812,-33.7460496092,291.137240582,-33.8452405537 +J1980.19,B1987.08,313.983328941,27.7572327639,313.771329108,27.6807919311,314.195342452,27.8339672537 +J1995.29,B1984.85,347.273135054,-13.6880685538,347.211387919,-13.7136412695,347.334872743,-13.662489607 +J2008.28,B1969.09,260.526724891,-37.6134342267,260.667857852,-37.6209601213,260.385615908,-37.6057963361 +J1984.85,B1992.51,231.291118043,-27.2371455509,231.063254934,-27.1842630084,231.519165836,-27.2897662439 +J1987.09,B1976.41,258.283303492,-30.1025933842,258.077147166,-30.0878669846,258.489514237,-30.1170665366 +J2006.16,B1994.65,168.335642599,-44.084769302,168.407881134,-44.1183592869,168.263437199,-44.0511880472 +J2014.94,B1991.03,117.210483914,32.8708634152,117.449614999,32.8326715727,116.971180598,32.9087464534 +J2002.23,B1961.43,158.272058119,-29.286471988,158.29805553,-29.2980114305,158.246062428,-29.2749346296 +J1984.88,B1991.03,262.688069789,-48.1516431413,262.401200048,-48.1407150038,262.975034556,-48.1621531697 +J2014.21,B1956.93,357.845250924,19.2890677934,358.026315201,19.3681291925,357.664269464,19.2100157767 +J2015.72,B1974.12,243.674536239,-10.0431678136,243.889881509,-10.0818251308,243.459271586,-10.0042157281 +J2010.54,B1957.44,284.696106425,19.6051067047,284.810926274,19.6200552,284.581280582,19.5902719604 +J2022.20,B1972.41,61.5291328053,18.6403709997,61.8503393647,18.6989763949,61.2081620218,18.581156754 +J2017.75,B1983.30,9.66573928438,-22.9075078717,9.88608757274,-22.8101292831,9.44526590432,-23.0049503113 +J2023.18,B1989.45,288.133287813,-36.6947385674,288.521507272,-36.654154333,287.744731719,-36.7344915409 +J1998.23,B1983.10,325.340113758,-33.7758802174,325.313691637,-33.783980295,325.366532233,-33.7677775537 +J1999.25,B1985.58,8.88343575454,-49.4693354042,8.87458135076,-49.4734614153,8.89228952149,-49.4652094919 +J2004.32,B1994.40,177.029034641,-67.7755279684,177.081382811,-67.7995455131,176.976736518,-67.7515115552 +J2022.10,B1957.08,189.451860246,-68.7071945134,189.787950236,-68.8284977585,189.117915692,-68.5857730927 +J1993.61,B1957.38,214.691763751,-32.6160600699,214.596970957,-32.5867949166,214.786602083,-32.6452917256 +J2004.91,B1966.30,18.7047162369,-32.9080620608,18.7619437329,-32.8821737407,18.6474776276,-32.9339591431 +J2005.68,B1951.59,322.232230099,14.4669345738,322.300004441,14.4919497078,322.164454374,14.4419423495 +J2003.00,B1984.39,262.175824918,51.7319974933,262.193291036,51.7297325887,262.15835963,51.7342674421 +J1980.93,B1988.24,294.6060041,34.0181871087,294.426858562,33.9741356521,294.78513452,34.0625403768 +J1995.15,B1967.50,180.08019102,26.2892216009,180.018069261,26.3162194666,180.142298341,26.2622237714 +J1986.07,B1980.80,291.668187169,-22.2789167174,291.460165406,-22.3074160406,291.876124294,-22.2501557708 +J2014.41,B1997.92,34.548669268,-15.8924906144,34.7203476357,-15.826491503,34.3769912557,-15.9586260582 +J2013.20,B1964.55,78.8220157436,-37.4332268082,78.9359542832,-37.4190574603,78.7080839461,-37.4475395217 +J1983.72,B1984.33,93.1388621771,60.5731416456,92.7698274429,60.5778081354,93.5078078659,60.5678923219 +J2011.19,B1952.11,168.518071423,7.09229333513,168.662964922,7.03122231792,168.373145295,7.15333299716 +J2021.23,B1953.13,165.374352937,39.3890686842,165.670569356,39.2746286306,165.077550855,39.5033543186 +J1998.80,B1990.72,255.423520875,-17.5881075751,255.406106679,-17.5864187707,255.440935444,-17.5897944148 +J2020.65,B1971.83,64.0990821181,36.8289797648,64.4412908098,36.8788812849,63.757239339,36.77846091 +J1996.87,B1969.60,191.321958369,-52.3532066605,191.277444974,-52.3361209946,191.366491705,-52.3702896721 +J1978.29,B1966.53,60.3872023631,25.1025882655,60.0600049106,25.0425615489,60.7146932542,25.1620146503 +J1993.19,B1972.88,276.773010626,56.6051138031,276.742873164,56.6006572956,276.803141964,56.6095901107 +J1984.47,B1991.77,334.141397682,37.3852087993,333.971320286,37.3074623211,334.311570487,37.4630672642 +J1982.42,B1973.34,219.417716878,-20.2290328911,219.169713749,-20.1532857902,219.66593381,-20.3045108915 +J1985.55,B1971.06,54.0660580808,-29.3264933861,53.9175360432,-29.3737907652,54.2145819747,-29.2793648485 +J2018.98,B1978.54,176.26561333,-0.572718169429,176.5087243,-0.678171194716,176.022494179,-0.467294315659 +J2015.89,B1986.95,135.84418338,-9.94938261687,136.036951663,-10.0129567306,135.651382202,-9.88601582693 +J2006.58,B1952.75,305.496508312,-8.63421746611,305.585332083,-8.61291748186,305.407668201,-8.65547120765 +J2022.76,B1981.21,327.995002307,-58.3471659896,328.394703325,-58.2394830075,327.593625588,-58.4543795694 +J1980.95,B1981.05,138.185539617,11.9337947187,137.926465957,12.0126777715,138.444435852,11.854592026 +J2005.11,B1950.06,113.578525223,29.6301583121,113.658818144,29.6187548389,113.498216367,29.6415252375 +J1991.57,B1980.14,204.621895006,36.5235009134,204.528365616,36.5661830045,204.715395365,36.4808507277 +J2016.08,B1952.01,67.6144926088,-13.7094836718,67.8003322803,-13.675528411,67.4286781478,-13.7437074086 +J2007.99,B1979.29,45.3029557779,36.4639084123,45.4287375369,36.4951563695,45.1772514486,36.4325910517 +J1996.13,B1972.42,247.534489816,-3.23349952461,247.483791774,-3.22525417405,247.585191141,-3.24172726082 +J2010.80,B1967.69,287.858418461,26.2825631559,287.968526608,26.3010624761,287.748304904,26.2641738179 +J1985.76,B1996.68,206.473163472,-38.4312130715,206.262844929,-38.3601778797,206.683760191,-38.5021184668 +J1975.84,B1963.36,350.362793376,-7.51631961926,350.050245875,-7.64886538089,350.675192428,-7.38365103931 +J1989.04,B1964.06,228.259575769,40.311002157,228.157788783,40.3516658201,228.36135704,40.2704193663 +J2005.09,B1975.25,319.831820932,40.7337792676,319.881302594,40.7554460493,319.782343346,40.712128268 +J1998.03,B1982.34,178.349313153,-38.3854710615,178.324338212,-38.3745092745,178.374291779,-38.3964329888 +J2010.53,B1998.53,126.58195076,-73.6980337652,126.555725353,-73.7329650434,126.607757619,-73.6630811157 +J1983.23,B1951.79,257.122932676,24.0154376566,256.948650568,24.0363842696,257.297226196,23.9947678892 +J2022.01,B1971.16,181.414481921,-17.7858263698,181.697561318,-17.9083119018,181.131603746,-17.6633258663 +J2022.77,B1979.42,81.2295383474,-9.26450146427,81.5008624611,-9.24547745382,80.9582426792,-9.28411870238 +J2024.04,B1986.59,88.1907984871,32.4238226453,88.5837995469,32.4275810011,87.7978296174,32.4191468321 +J1977.94,B1958.78,285.408252018,67.7826509035,285.415288738,67.7500149744,285.400733562,67.815271794 +J2012.02,B1975.53,178.262069224,51.7327600597,178.418521574,51.6658699581,178.105379001,51.7996446322 +J2005.03,B1975.01,329.433722424,-46.8960749035,329.513358137,-46.8719488299,329.354038052,-46.9201811836 +J1979.45,B1994.64,340.333860195,36.5560891832,340.099269221,36.4484316911,340.568666175,36.6639044187 +J2024.47,B1969.13,191.963602676,21.3572019706,192.265985395,21.2240120738,191.661020584,21.4905409785 +J2002.44,B1983.14,90.8973340407,3.44588414281,90.9294194634,3.44566140242,90.8652485585,3.44609927685 +J2008.72,B1952.34,259.510340943,47.0512387915,259.570777662,47.0424288828,259.449910071,47.060099055 +J2011.24,B1987.56,132.277954966,30.4307232942,132.449103167,30.388553739,132.106687114,30.4727545196 +J2003.42,B1968.44,179.513439448,-54.44865752,179.557050535,-54.4676997913,179.469848483,-54.4296153679 +J2001.37,B1997.40,81.5670170865,-19.9451944488,81.5818413055,-19.9440843678,81.5521929287,-19.9463064817 +J1982.54,B1967.36,127.283632829,-10.0946390302,127.073706282,-10.0359014336,127.493515779,-10.1536599704 +J1987.01,B1984.19,234.306643184,-86.4404274379,233.208246223,-86.397666282,235.429405927,-86.482050156 +J1995.13,B1991.23,112.65584231,11.2521500479,112.588477624,11.262573342,112.723199816,11.2416973345 +J1978.39,B1974.31,276.744760981,21.4151577082,276.514780435,21.4012711846,276.974729777,21.4295237953 +J2012.92,B1999.21,281.461357214,-15.511897988,281.646447197,-15.4974841762,281.27623546,-15.5260840726 +J1992.13,B1980.19,306.867413859,-11.9467360888,306.759165107,-11.9729853099,306.975635305,-11.9204206469 +J2024.49,B1987.98,341.966066455,-2.82477813631,342.281869892,-2.69502407373,341.650132043,-2.95429956154 +J2019.43,B1984.23,38.6362483924,9.3322810896,38.8963811972,9.41661462037,38.3762808891,9.24764100258 +J2021.93,B1996.62,327.861128148,-46.529254733,328.210157236,-46.4256790337,327.511186361,-46.632434339 +J2011.96,B1997.49,120.979858288,87.22617179,122.295667673,87.1912385961,119.633038513,87.2597786682 +J1976.35,B1999.51,297.496953653,0.839666332936,297.195644583,0.779185153185,297.798143461,0.9007616283 +J1994.12,B1956.31,323.316228643,-0.794522598791,323.240624027,-0.820755621072,323.391823819,-0.768263773348 +J1975.53,B1998.83,15.3775095611,-38.7740290611,15.0928652608,-38.9054807438,15.6617662484,-38.6427567079 +J1978.26,B1961.46,70.486199672,-24.0682131367,70.2586642967,-24.1088709419,70.7137598878,-24.0280083925 +J2009.07,B1959.30,106.020475905,36.6574903487,106.172780811,36.6434848171,105.868125064,36.6713668422 +J2024.33,B1975.46,225.719957006,-24.2326924255,226.075567685,-24.326948892,225.364802775,-24.1378344642 +J2008.31,B1976.52,31.0403178442,23.2187819108,31.1570536178,23.258394038,30.9236362505,23.1791211798 +J1995.76,B1964.13,51.4602071324,-27.0058546166,51.4152973853,-27.0205700299,51.5051169729,-26.991153671 +J1977.06,B1965.51,185.697546923,55.594260797,185.421779304,55.721374348,185.972510783,55.4672081659 +J2019.71,B1965.49,248.162878677,-23.7609450888,248.460344259,-23.8014906584,247.865592952,-23.7198708623 +J2010.34,B1963.32,308.385291884,51.2349043028,308.461574811,51.2706847328,308.308996421,51.1991839517 +J1998.94,B1979.67,233.050205996,63.3093356498,233.046004532,63.3128868847,233.05440839,63.3057847603 +J1985.78,B1960.86,209.382723191,-41.4659129842,209.166390198,-41.3968581581,209.599369778,-41.5348210618 +J1979.09,B1970.12,256.001743835,-16.3448051664,255.700801743,-16.3163460002,256.302789611,-16.3726709454 +J2008.66,B1964.43,90.8700685367,21.3678694408,90.9998841203,21.3670776114,90.7402515844,21.3685520416 +J2024.74,B1958.69,324.057486054,57.4352750563,324.24791254,57.5469196438,323.867096755,57.3238991167 +J2004.68,B1961.29,159.225729446,-45.2472278228,159.276379685,-45.27159791,159.175093005,-45.2228659014 +J2017.01,B1999.43,7.38749687642,-53.1540997613,7.58899121668,-53.0602158752,7.18561693871,-53.2480265357 +J1982.65,B1971.70,345.477965039,-10.1831007688,345.251295994,-10.2765575516,345.704526845,-10.0895481742 +J2018.81,B1991.41,234.801152081,71.8511934075,234.781598949,71.7908263583,234.821698672,71.9115305128 +J2002.24,B1978.63,184.754250038,-66.4894904918,184.785352293,-66.5019187594,184.723164704,-66.4770616601 +J2024.18,B1982.60,245.64829793,-38.7682176459,246.056856393,-38.8232771335,245.240252475,-38.7122842298 +J2011.79,B1986.49,176.234540627,12.5643501076,176.386539261,12.4988499005,176.082498099,12.6298388968 +J1979.65,B1969.56,333.536461653,-55.645568776,333.201327008,-55.7468423589,333.870449248,-55.544000369 +J1989.61,B1969.64,185.716717981,-21.5568171888,185.58137022,-21.4992561733,185.852126151,-21.6143646005 +J1988.65,B1992.98,25.9775574253,12.7249831044,25.8259425625,12.6681381133,26.129235078,12.7817548524 +J1978.56,B1990.50,204.302987352,-36.6989586206,203.992013028,-36.590035009,204.614547277,-36.8076153687 +J2009.00,B1991.83,221.487546141,22.5689795999,221.589063682,22.531481696,221.386026462,22.6065363227 +J1986.24,B1959.40,338.956666009,-30.7135370512,338.763951811,-30.7849831444,339.149190984,-30.6419984779 +J2002.57,B1967.98,149.5308077,21.1458572723,149.566540611,21.1335179376,149.495070016,21.1581920836 +J2013.49,B1974.10,95.1983908472,-1.61163007915,95.3691226237,-1.61855225484,95.0276507441,-1.6049307767 +J1985.59,B1998.30,35.0615395317,-28.6207880841,34.9020739253,-28.6865248849,35.2209739544,-28.5551795263 +J1989.64,B1978.17,174.903919876,-25.7547140538,174.773704705,-25.6972724215,175.03419171,-25.8121673429 +J1992.82,B1991.38,167.27863063,54.1842744725,167.174390005,54.2232575861,167.382755502,54.1452753419 +J2022.82,B1953.81,10.7133541168,-26.6356033619,10.9937676648,-26.5108341533,10.4326984418,-26.7604882278 +J2008.01,B1977.66,249.939886269,43.0233288254,250.003422167,43.0080617632,249.876355463,43.0386423389 +J2022.53,B1977.40,258.100960451,-37.3838036503,258.483519166,-37.4092478087,257.718621316,-37.3575402646 +J1979.84,B1995.27,262.732112385,-19.8057986634,262.43378889,-19.7913038792,263.030493701,-19.8197136836 +J1988.23,B1968.47,149.166366188,63.2857703333,148.948549069,63.3419675102,149.383595682,63.2294456404 +J1988.61,B1995.06,5.4355841259,0.695799807062,5.28960631728,0.632663567066,5.5815705005,0.758920757926 +J2016.75,B1957.03,327.231056694,-11.1377396332,327.455533267,-11.0592260753,327.006447871,-11.2160554034 +J2015.64,B1954.96,284.17633852,-71.0631656787,284.622383668,-71.0415076498,283.729510587,-71.08416592 +J1989.69,B1998.66,59.4717008987,14.0960045791,59.3272356842,14.0667790018,59.6162113325,14.1251054777 +J2007.49,B1997.10,112.602946077,-17.7763932222,112.686574032,-17.7924540137,112.519313748,-17.7603886122 +J1996.05,B1979.55,219.940310095,-26.5130440909,219.882677955,-26.4961740042,219.997955886,-26.5298999803 +J1978.73,B1952.60,131.216503219,-60.6790709392,131.102499987,-60.6011373948,131.330276761,-60.7571815411 +J2011.94,B1952.51,56.1738921125,-19.3427782341,56.3074873507,-19.3058404816,56.0403066499,-19.3798447522 +J2004.27,B1966.23,63.8293728328,-59.8347944156,63.8473703919,-59.8243161934,63.8113850715,-59.8452793392 +J1992.23,B1968.79,312.440281577,-82.909075449,312.082844158,-82.9381618829,312.795193361,-82.879790561 +J1987.90,B1988.21,104.43408064,-66.6447299251,104.430099425,-66.6279457743,104.437942894,-66.6615185415 +J1989.59,B1992.96,210.664663673,-17.5831928536,210.521977043,-17.533300504,210.807417956,-17.6330115873 +J2013.49,B1977.29,163.438155327,-54.6954182678,163.580861698,-54.7674320028,163.295621486,-54.6234578045 +J1996.22,B1966.19,148.024127582,2.32865180198,147.975248991,2.34649291874,148.073002076,2.31080117706 +J1989.43,B1970.29,317.748400264,-34.6457182874,317.585582699,-34.6892153211,317.911087895,-34.6021088555 +J1988.21,B1955.48,249.374885326,79.5246095403,249.556636954,79.5476344368,249.19427904,79.5013904045 +J1988.85,B1956.86,100.53840787,-27.7507223648,100.427671298,-27.7394319384,100.64914055,-27.7621307317 +J2017.12,B1987.27,23.1984832267,21.1208388177,23.4324436323,21.2083599648,22.9647269089,21.0331644062 +J1983.48,B1993.82,71.5045009532,3.00896662959,71.2883142486,2.97961964121,71.7207379936,3.03798447641 +J1987.60,B1962.95,335.405788093,-6.90098238794,335.243429575,-6.9637085665,335.56809315,-6.83817480211 +J2004.59,B1984.28,307.588884401,18.8511389183,307.640784808,18.8667407469,307.536982665,18.8355554286 +J2023.77,B1967.96,343.704504442,-46.9224252956,344.048269178,-46.7952999698,343.359747105,-47.0493275593 +J1975.21,B1950.30,18.8112053675,35.1485289159,18.4626544919,35.0177535414,19.1604681331,35.2790332993 +J1987.00,B1988.06,208.609805013,-46.3894275721,208.40705329,-46.3258250272,208.812873725,-46.4529073994 +J2011.33,B1970.70,172.978655994,15.4172636989,173.125918709,15.3546485543,172.831339838,15.4798590369 +J1987.54,B1966.69,7.8152324312,-34.9365736294,7.662140954,-35.0053080694,7.96821251179,-34.8678643727 +J2020.91,B1963.90,134.503366944,-72.4111269318,134.508752259,-72.4927321248,134.496713839,-72.3295304626 +J2000.41,B1979.63,149.073048424,14.7065160273,149.078614359,14.7045538676,149.067482395,14.7084780734 +J2000.13,B1966.26,217.406604209,16.5186514295,217.408141458,16.5180765377,217.40506696,16.5192263332 +J2010.62,B1996.84,241.829541848,16.5114334946,241.950169443,16.4835846733,241.708924453,16.5393920451 +J2006.99,B1954.80,301.991652158,46.8228690265,302.04602973,46.8435076393,301.937270072,46.8022617404 +J1989.22,B1994.16,280.629434995,-19.0017596678,280.47101531,-19.0127425519,280.787831403,-18.9906136966 +J1975.49,B1978.40,144.252375855,-10.2581330338,143.952794662,-10.1475953709,144.551902691,-10.3690875087 +J2004.74,B1953.10,286.0305233,12.7464714044,286.085513107,12.7537759609,285.975531683,12.739191194 +J2017.05,B1993.75,321.524751743,61.8464645226,321.632828791,61.9208329855,321.416592726,61.7722074849 +J1999.33,B1961.24,94.4962887092,-44.0946278203,94.4913067992,-44.0943400421,94.5012706073,-44.0949159215 +J2014.04,B1989.97,356.110922656,-39.1892569317,356.295020794,-39.1112673044,355.926608129,-39.2672295394 +J1995.63,B1990.09,307.190555646,-43.7191034979,307.116027145,-43.7337921796,307.265056341,-43.7043896052 +J1993.99,B1951.45,263.331776174,25.1917278571,263.270410907,25.195633174,263.393142235,25.187858127 +J2019.92,B1981.35,128.003624894,58.8666544649,128.402920612,58.7980654005,127.60315064,58.9346336939 +J2019.84,B1980.23,317.984216655,-8.89508525523,318.249905253,-8.81284951457,317.718360008,-8.97697809843 +J2011.02,B1953.91,312.465272698,5.18400310772,312.602344189,5.22548362633,312.328177207,5.1426308705 +J1989.24,B1988.65,344.0759205,-20.8070551085,343.931796087,-20.8646386849,344.219970948,-20.7494301859 +J1991.99,B1957.17,0.0386123471053,-42.7336081023,359.935984167,-42.778197083,0.141166805258,-42.6890191696 +J1989.26,B1973.18,5.95477509083,23.9728714179,5.81446857607,23.9133953285,6.09515408275,24.0323323244 +J2013.98,B1954.86,113.065220613,27.4191705733,113.281430058,27.3885381062,112.848903077,27.4495327526 +J1975.23,B1978.49,358.313822853,67.0446512684,358.006936646,66.906817269,358.62239279,67.1825070772 +J1979.23,B1970.19,53.5839203362,-15.011852649,53.3428201185,-15.0806959511,53.8250625845,-14.9434009383 +J1997.07,B1979.33,60.2557627351,25.6833225299,60.211425166,25.6752201005,60.3001057813,25.6914140019 +J1987.55,B1987.44,273.08593329,76.4393919681,273.213340941,76.4355890802,272.958409407,76.443040877 +J2020.29,B1994.48,25.0306798156,-51.1202356021,25.2312583612,-51.0179789716,24.8298733815,-51.2226596567 +J2019.04,B1968.97,253.970437895,31.094899255,254.152950904,31.0657978691,253.787939628,31.1243251572 +J2010.83,B1964.62,168.89950144,-43.2270950714,169.027402777,-43.286276106,168.771701635,-43.167939929 +J1986.93,B1975.46,3.66775780511,39.2622225734,3.49661533708,39.1896011422,3.8390874932,39.3348301065 +J2021.26,B1976.64,278.936590632,6.21231840756,279.196246371,6.23097561081,278.676905991,6.19419108431 +J2023.48,B1955.27,285.91236301,9.40548699672,286.192352454,9.44163731007,285.632321786,9.36995103333 +J2003.91,B1952.30,53.8450026285,60.7259893436,53.9264872004,60.7388195386,53.763567111,60.7131341506 +J1988.45,B1981.10,8.53330744443,-7.54498028811,8.38660351469,-7.60858303157,8.6800005788,-7.48140196135 +J1990.05,B1991.12,274.342957522,-1.24603088049,274.214291508,-1.25015780077,274.471619291,-1.24177991998 +J2006.27,B1952.75,80.5212647616,19.4060625392,80.6137303362,19.4117801816,80.4288063349,19.4002893257 +J2013.99,B1989.90,94.3827831954,15.0883386826,94.5829613625,15.0822437507,94.1825907513,15.0941622997 +J1996.06,B1962.21,164.473020999,-47.6965440186,164.429008903,-47.6754169753,164.51704615,-47.7176755752 +J2007.85,B1990.18,89.9736906625,-16.9964263489,90.0609144086,-16.9964467144,89.8864669212,-16.9964725118 +J1996.18,B1964.91,204.582082173,15.6789515837,204.535627332,15.698292886,204.628535832,15.6596174499 diff --git a/astropy/coordinates/tests/accuracy/test_altaz_icrs.py b/astropy/coordinates/tests/accuracy/test_altaz_icrs.py new file mode 100644 index 0000000..6150cd9 --- /dev/null +++ b/astropy/coordinates/tests/accuracy/test_altaz_icrs.py @@ -0,0 +1,189 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""Accuracy tests for AltAz to ICRS coordinate transformations. + +We use "known good" examples computed with other coordinate libraries. + +Note that we use very low precision asserts because some people run tests on 32-bit +machines and we want the tests to pass there. +TODO: check if these tests pass on 32-bit machines and implement +higher-precision checks on 64-bit machines. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest + +from .... import units as u +from ....time import Time +from ...builtin_frames import AltAz +from ... import EarthLocation +from ... import Angle, SkyCoord + + +def test_against_hor2eq(): + """Check that Astropy gives consistent results with an IDL hor2eq example. + + See : http://idlastro.gsfc.nasa.gov/ftp/pro/astro/hor2eq.pro + + Test is against these run outputs, run at 2000-01-01T12:00:00: + + # NORMAL ATMOSPHERE CASE + IDL> hor2eq, ten(37,54,41), ten(264,55,06), 2451545.0d, ra, dec, /verb, obs='kpno', pres=781.0, temp=273.0 + Latitude = +31 57 48.0 Longitude = *** 36 00.0 + Julian Date = 2451545.000000 + Az, El = 17 39 40.4 +37 54 41 (Observer Coords) + Az, El = 17 39 40.4 +37 53 40 (Apparent Coords) + LMST = +11 15 26.5 + LAST = +11 15 25.7 + Hour Angle = +03 38 30.1 (hh:mm:ss) + Ra, Dec: 07 36 55.6 +15 25 02 (Apparent Coords) + Ra, Dec: 07 36 55.2 +15 25 08 (J2000.0000) + Ra, Dec: 07 36 55.2 +15 25 08 (J2000) + IDL> print, ra, dec + 114.23004 15.418818 + + # NO PRESSURE CASE + IDL> hor2eq, ten(37,54,41), ten(264,55,06), 2451545.0d, ra, dec, /verb, obs='kpno', pres=0.0, temp=273.0 + Latitude = +31 57 48.0 Longitude = *** 36 00.0 + Julian Date = 2451545.000000 + Az, El = 17 39 40.4 +37 54 41 (Observer Coords) + Az, El = 17 39 40.4 +37 54 41 (Apparent Coords) + LMST = +11 15 26.5 + LAST = +11 15 25.7 + Hour Angle = +03 38 26.4 (hh:mm:ss) + Ra, Dec: 07 36 59.3 +15 25 31 (Apparent Coords) + Ra, Dec: 07 36 58.9 +15 25 37 (J2000.0000) + Ra, Dec: 07 36 58.9 +15 25 37 (J2000) + IDL> print, ra, dec + 114.24554 15.427022 + """ + # Observatory position for `kpno` from here: + # http://idlastro.gsfc.nasa.gov/ftp/pro/astro/observatory.pro + location = EarthLocation(lon=Angle('-111d36.0m'), + lat=Angle('31d57.8m'), + height=2120. * u.m) + + obstime = Time(2451545.0, format='jd', scale='ut1') + + altaz_frame = AltAz(obstime=obstime, location=location, + temperature=0 * u.deg_C, pressure=0.781 * u.bar) + altaz_frame_noatm = AltAz(obstime=obstime, location=location, + temperature=0 * u.deg_C, pressure=0.0 * u.bar) + altaz = SkyCoord('264d55m06s 37d54m41s', frame=altaz_frame) + altaz_noatm = SkyCoord('264d55m06s 37d54m41s', frame=altaz_frame_noatm) + + radec_frame = 'icrs' + + radec_actual = altaz.transform_to(radec_frame) + radec_actual_noatm = altaz_noatm.transform_to(radec_frame) + + radec_expected = SkyCoord('07h36m55.2s +15d25m08s', frame=radec_frame) + distance = radec_actual.separation(radec_expected).to('arcsec') + + # this comes from running the example hor2eq but with the pressure set to 0 + radec_expected_noatm = SkyCoord('07h36m58.9s +15d25m37s', frame=radec_frame) + distance_noatm = radec_actual_noatm.separation(radec_expected_noatm).to('arcsec') + + # The baseline difference is ~2.3 arcsec with one atm of pressure. The + # difference is mainly due to the somewhat different atmospheric model that + # hor2eq assumes. This is confirmed by the second test which has the + # atmosphere "off" - the residual difference is small enough to be embedded + # in the assumptions about "J2000" or rounding errors. + assert distance < 5 * u.arcsec + assert distance_noatm < 0.4 * u.arcsec + + +def test_against_pyephem(): + """Check that Astropy gives consistent results with one PyEphem example. + + PyEphem: http://rhodesmill.org/pyephem/ + + See example input and output here: + https://gist.github.com/zonca/1672906 + https://github.com/phn/pytpm/issues/2#issuecomment-3698679 + """ + obstime = Time('2011-09-18 08:50:00') + location = EarthLocation(lon=Angle('-109d24m53.1s'), + lat=Angle('33d41m46.0s'), + height=30000. * u.m) + # We are using the default pressure and temperature in PyEphem + # relative_humidity = ? + # obswl = ? + altaz_frame = AltAz(obstime=obstime, location=location, + temperature=15 * u.deg_C, pressure=1.010 * u.bar) + + altaz = SkyCoord('6.8927d -60.7665d', frame=altaz_frame) + radec_actual = altaz.transform_to('icrs') + + radec_expected = SkyCoord('196.497518d -4.569323d', frame='icrs') # EPHEM + # radec_expected = SkyCoord('196.496220d -4.569390d', frame='icrs') # HORIZON + distance = radec_actual.separation(radec_expected).to('arcsec') + # TODO: why is this difference so large? + # It currently is: 31.45187984720655 arcsec + assert distance < 1e3 * u.arcsec + + # Add assert on current Astropy result so that we notice if something changes + radec_expected = SkyCoord('196.495372d -4.560694d', frame='icrs') + distance = radec_actual.separation(radec_expected).to('arcsec') + # Current value: 0.0031402822944751997 arcsec + assert distance < 1 * u.arcsec + + +def test_against_jpl_horizons(): + """Check that Astropy gives consistent results with the JPL Horizons example. + + The input parameters and reference results are taken from this page: + (from the first row of the Results table at the bottom of that page) + http://ssd.jpl.nasa.gov/?horizons_tutorial + """ + obstime = Time('1998-07-28 03:00') + location = EarthLocation(lon=Angle('248.405300d'), + lat=Angle('31.9585d'), + height=2.06 * u.km) + # No atmosphere + altaz_frame = AltAz(obstime=obstime, location=location) + + altaz = SkyCoord('143.2970d 2.6223d', frame=altaz_frame) + radec_actual = altaz.transform_to('icrs') + radec_expected = SkyCoord('19h24m55.01s -40d56m28.9s', frame='icrs') + distance = radec_actual.separation(radec_expected).to('arcsec') + # Current value: 0.238111 arcsec + assert distance < 1 * u.arcsec + + +@pytest.mark.xfail +def test_fk5_equinox_and_epoch_j2000_0_to_topocentric_observed(): + """ + http://phn.github.io/pytpm/conversions.html#fk5-equinox-and-epoch-j2000-0-to-topocentric-observed + """ + # Observatory position for `kpno` from here: + # http://idlastro.gsfc.nasa.gov/ftp/pro/astro/observatory.pro + location = EarthLocation(lon=Angle('-111.598333d'), + lat=Angle('31.956389d'), + height=2093.093 * u.m) # TODO: height correct? + + obstime = Time('2010-01-01 12:00:00', scale='utc') + # relative_humidity = ? + # obswl = ? + altaz_frame = AltAz(obstime=obstime, location=location, + temperature=0 * u.deg_C, pressure=0.781 * u.bar) + + radec = SkyCoord('12h22m54.899s 15d49m20.57s', frame='fk5') + + altaz_actual = radec.transform_to(altaz_frame) + + altaz_expected = SkyCoord('264d55m06s 37d54m41s', frame='altaz') + # altaz_expected = SkyCoord('343.586827647d 15.7683070508d', frame='altaz') + # altaz_expected = SkyCoord('133.498195532d 22.0162383595d', frame='altaz') + distance = altaz_actual.separation(altaz_expected) + # print(altaz_actual) + # print(altaz_expected) + # print(distance) + """TODO: Current output is completely incorrect ... xfailing this test for now. + + + + 68d02m45.732s + """ + + assert distance < 1 * u.arcsec diff --git a/astropy/coordinates/tests/accuracy/test_ecliptic.py b/astropy/coordinates/tests/accuracy/test_ecliptic.py new file mode 100644 index 0000000..b0cc59b --- /dev/null +++ b/astropy/coordinates/tests/accuracy/test_ecliptic.py @@ -0,0 +1,110 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Accuracy tests for Ecliptic coordinate systems. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from ....tests.helper import quantity_allclose +from .... import units as u +from ... import SkyCoord +from ...builtin_frames import FK5, ICRS, GCRS, GeocentricTrueEcliptic, BarycentricTrueEcliptic, HeliocentricTrueEcliptic +from ....constants import R_sun, R_earth + + +def test_against_pytpm_doc_example(): + """ + Check that Astropy's Ecliptic systems give answers consistent with pyTPM + + Currently this is only testing against the example given in the pytpm docs + """ + fk5_in = SkyCoord('12h22m54.899s', '15d49m20.57s', frame=FK5(equinox='J2000')) + pytpm_out = BarycentricTrueEcliptic(lon=178.78256462*u.deg, + lat=16.7597002513*u.deg, + equinox='J2000') + astropy_out = fk5_in.transform_to(pytpm_out) + + assert pytpm_out.separation(astropy_out) < (1*u.arcsec) + + +def test_ecliptic_heliobary(): + """ + Check that the ecliptic transformations for heliocentric and barycentric + at least more or less make sense + """ + icrs = ICRS(1*u.deg, 2*u.deg, distance=1.5*R_sun) + + bary = icrs.transform_to(BarycentricTrueEcliptic) + helio = icrs.transform_to(HeliocentricTrueEcliptic) + + # make sure there's a sizable distance shift - in 3d hundreds of km, but + # this is 1D so we allow it to be somewhat smaller + assert np.abs(bary.distance - helio.distance) > 1*u.km + + # now make something that's got the location of helio but in bary's frame. + # this is a convenience to allow `separation` to work as expected + helio_in_bary_frame = bary.realize_frame(helio.cartesian) + assert bary.separation(helio_in_bary_frame) > 1*u.arcmin + + +def test_ecl_geo(): + """ + Check that the geocentric version at least gets well away from GCRS. For a + true "accuracy" test we need a comparison dataset that is similar to the + geocentric/GCRS comparison we want to do here. Contributions welcome! + """ + gcrs = GCRS(10*u.deg, 20*u.deg, distance=1.5*R_earth) + gecl = gcrs.transform_to(GeocentricTrueEcliptic) + + assert quantity_allclose(gecl.distance, gcrs.distance) + + +def test_arraytransforms(): + """ + Test that transforms to/from ecliptic coordinates work on array coordinates + (not testing for accuracy.) + """ + ra = np.ones((4, ), dtype=float) * u.deg + dec = 2*np.ones((4, ), dtype=float) * u.deg + distance = np.ones((4, ), dtype=float) * u.au + + test_icrs = ICRS(ra=ra, dec=dec, distance=distance) + test_gcrs = GCRS(test_icrs.data) + + bary_arr = test_icrs.transform_to(BarycentricTrueEcliptic) + assert bary_arr.shape == ra.shape + + helio_arr = test_icrs.transform_to(HeliocentricTrueEcliptic) + assert helio_arr.shape == ra.shape + + geo_arr = test_gcrs.transform_to(GeocentricTrueEcliptic) + assert geo_arr.shape == ra.shape + + # now check that we also can go back the other way without shape problems + bary_icrs = bary_arr.transform_to(ICRS) + assert bary_icrs.shape == test_icrs.shape + + helio_icrs = helio_arr.transform_to(ICRS) + assert helio_icrs.shape == test_icrs.shape + + geo_gcrs = geo_arr.transform_to(GCRS) + assert geo_gcrs.shape == test_gcrs.shape + + +def test_roundtrip_scalar(): + icrs = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.au) + gcrs = GCRS(icrs.cartesian) + + bary = icrs.transform_to(BarycentricTrueEcliptic) + helio = icrs.transform_to(HeliocentricTrueEcliptic) + geo = gcrs.transform_to(GeocentricTrueEcliptic) + + bary_icrs = bary.transform_to(ICRS) + helio_icrs = helio.transform_to(ICRS) + geo_gcrs = geo.transform_to(GCRS) + + assert quantity_allclose(bary_icrs.cartesian.xyz, icrs.cartesian.xyz) + assert quantity_allclose(helio_icrs.cartesian.xyz, icrs.cartesian.xyz) + assert quantity_allclose(geo_gcrs.cartesian.xyz, gcrs.cartesian.xyz) diff --git a/astropy/coordinates/tests/accuracy/test_fk4_no_e_fk4.py b/astropy/coordinates/tests/accuracy/test_fk4_no_e_fk4.py new file mode 100644 index 0000000..27d398c --- /dev/null +++ b/astropy/coordinates/tests/accuracy/test_fk4_no_e_fk4.py @@ -0,0 +1,64 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + + +import numpy as np + +from .... import units as u +from ...builtin_frames import FK4NoETerms, FK4 +from ....time import Time +from ....table import Table +from ...angle_utilities import angular_separation +from ....utils.data import get_pkg_data_contents +from ....extern.six.moves import range + +# the number of tests to run +from . import N_ACCURACY_TESTS + +# It looks as though SLALIB, which AST relies on, assumes a simplified version +# of the e-terms corretion, so we have to up the tolerance a bit to get things +# to agree. +TOLERANCE = 1.e-5 # arcseconds + + +def test_fk4_no_e_fk4(): + lines = get_pkg_data_contents('fk4_no_e_fk4.csv').split('\n') + t = Table.read(lines, format='ascii', delimiter=',', guess=False) + + if N_ACCURACY_TESTS >= len(t): + idxs = range(len(t)) + else: + idxs = np.random.randint(len(t), size=N_ACCURACY_TESTS) + + diffarcsec1 = [] + diffarcsec2 = [] + for i in idxs: + # Extract row + r = t[int(i)] # int here is to get around a py 3.x astropy.table bug + + # FK4 to FK4NoETerms + c1 = FK4(ra=r['ra_in']*u.deg, dec=r['dec_in']*u.deg, + obstime=Time(r['obstime'], scale='utc')) + c2 = c1.transform_to(FK4NoETerms) + + # Find difference + diff = angular_separation(c2.ra.radian, c2.dec.radian, + np.radians(r['ra_fk4ne']), np.radians(r['dec_fk4ne'])) + + diffarcsec1.append(np.degrees(diff) * 3600.) + + # FK4NoETerms to FK4 + c1 = FK4NoETerms(ra=r['ra_in']*u.deg, dec=r['dec_in']*u.deg, + obstime=Time(r['obstime'], scale='utc')) + c2 = c1.transform_to(FK4) + + # Find difference + diff = angular_separation(c2.ra.radian, c2.dec.radian, + np.radians(r['ra_fk4']), + np.radians(r['dec_fk4'])) + + diffarcsec2.append(np.degrees(diff) * 3600.) + + np.testing.assert_array_less(diffarcsec1, TOLERANCE) + np.testing.assert_array_less(diffarcsec2, TOLERANCE) diff --git a/astropy/coordinates/tests/accuracy/test_fk4_no_e_fk5.py b/astropy/coordinates/tests/accuracy/test_fk4_no_e_fk5.py new file mode 100644 index 0000000..f820ec5 --- /dev/null +++ b/astropy/coordinates/tests/accuracy/test_fk4_no_e_fk5.py @@ -0,0 +1,65 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + + +import numpy as np + +from .... import units as u +from ...builtin_frames import FK4NoETerms, FK5 +from ....time import Time +from ....table import Table +from ...angle_utilities import angular_separation +from ....utils.data import get_pkg_data_contents +from ....extern.six.moves import range + +# the number of tests to run +from . import N_ACCURACY_TESTS + +TOLERANCE = 0.03 # arcseconds + + +def test_fk4_no_e_fk5(): + lines = get_pkg_data_contents('fk4_no_e_fk5.csv').split('\n') + t = Table.read(lines, format='ascii', delimiter=',', guess=False) + + if N_ACCURACY_TESTS >= len(t): + idxs = range(len(t)) + else: + idxs = np.random.randint(len(t), size=N_ACCURACY_TESTS) + + diffarcsec1 = [] + diffarcsec2 = [] + for i in idxs: + # Extract row + r = t[int(i)] # int here is to get around a py 3.x astropy.table bug + + # FK4NoETerms to FK5 + c1 = FK4NoETerms(ra=r['ra_in']*u.deg, dec=r['dec_in']*u.deg, + obstime=Time(r['obstime'], scale='utc'), + equinox=Time(r['equinox_fk4'], scale='utc')) + c2 = c1.transform_to(FK5(equinox=Time(r['equinox_fk5'], scale='utc'))) + + # Find difference + diff = angular_separation(c2.ra.radian, c2.dec.radian, + np.radians(r['ra_fk5']), + np.radians(r['dec_fk5'])) + + diffarcsec1.append(np.degrees(diff) * 3600.) + + # FK5 to FK4NoETerms + c1 = FK5(ra=r['ra_in']*u.deg, dec=r['dec_in']*u.deg, + equinox=Time(r['equinox_fk5'], scale='utc')) + fk4neframe = FK4NoETerms(obstime=Time(r['obstime'], scale='utc'), + equinox=Time(r['equinox_fk4'], scale='utc')) + c2 = c1.transform_to(fk4neframe) + + # Find difference + diff = angular_separation(c2.ra.radian, c2.dec.radian, + np.radians(r['ra_fk4']), + np.radians(r['dec_fk4'])) + + diffarcsec2.append(np.degrees(diff) * 3600.) + + np.testing.assert_array_less(diffarcsec1, TOLERANCE) + np.testing.assert_array_less(diffarcsec2, TOLERANCE) diff --git a/astropy/coordinates/tests/accuracy/test_galactic_fk4.py b/astropy/coordinates/tests/accuracy/test_galactic_fk4.py new file mode 100644 index 0000000..c819f76 --- /dev/null +++ b/astropy/coordinates/tests/accuracy/test_galactic_fk4.py @@ -0,0 +1,62 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + + +import numpy as np + +from .... import units as u +from ...builtin_frames import Galactic, FK4 +from ....time import Time +from ....table import Table +from ...angle_utilities import angular_separation +from ....utils.data import get_pkg_data_contents +from ....extern.six.moves import range + +# the number of tests to run +from . import N_ACCURACY_TESTS + +TOLERANCE = 0.3 # arcseconds + + +def test_galactic_fk4(): + lines = get_pkg_data_contents('galactic_fk4.csv').split('\n') + t = Table.read(lines, format='ascii', delimiter=',', guess=False) + + if N_ACCURACY_TESTS >= len(t): + idxs = range(len(t)) + else: + idxs = np.random.randint(len(t), size=N_ACCURACY_TESTS) + + diffarcsec1 = [] + diffarcsec2 = [] + for i in idxs: + # Extract row + r = t[int(i)] # int here is to get around a py 3.x astropy.table bug + + # Galactic to FK4 + c1 = Galactic(l=r['lon_in']*u.deg, b=r['lat_in']*u.deg) + c2 = c1.transform_to(FK4(equinox=Time(r['equinox_fk4'], scale='utc'))) + + # Find difference + diff = angular_separation(c2.ra.radian, c2.dec.radian, + np.radians(r['ra_fk4']), + np.radians(r['dec_fk4'])) + + diffarcsec1.append(np.degrees(diff) * 3600.) + + # FK4 to Galactic + c1 = FK4(ra=r['lon_in']*u.deg, dec=r['lat_in']*u.deg, + obstime=Time(r['obstime'], scale='utc'), + equinox=Time(r['equinox_fk4'], scale='utc')) + c2 = c1.transform_to(Galactic) + + # Find difference + diff = angular_separation(c2.l.radian, c2.b.radian, + np.radians(r['lon_gal']), + np.radians(r['lat_gal'])) + + diffarcsec2.append(np.degrees(diff) * 3600.) + + np.testing.assert_array_less(diffarcsec1, TOLERANCE) + np.testing.assert_array_less(diffarcsec2, TOLERANCE) diff --git a/astropy/coordinates/tests/accuracy/test_icrs_fk5.py b/astropy/coordinates/tests/accuracy/test_icrs_fk5.py new file mode 100644 index 0000000..339944b --- /dev/null +++ b/astropy/coordinates/tests/accuracy/test_icrs_fk5.py @@ -0,0 +1,61 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + + +import numpy as np + +from .... import units as u +from ...builtin_frames import ICRS, FK5 +from ....time import Time +from ....table import Table +from ...angle_utilities import angular_separation +from ....utils.data import get_pkg_data_contents +from ....extern.six.moves import range + +# the number of tests to run +from . import N_ACCURACY_TESTS + +TOLERANCE = 0.03 # arcseconds + + +def test_icrs_fk5(): + lines = get_pkg_data_contents('icrs_fk5.csv').split('\n') + t = Table.read(lines, format='ascii', delimiter=',', guess=False) + + if N_ACCURACY_TESTS >= len(t): + idxs = range(len(t)) + else: + idxs = np.random.randint(len(t), size=N_ACCURACY_TESTS) + + diffarcsec1 = [] + diffarcsec2 = [] + for i in idxs: + # Extract row + r = t[int(i)] # int here is to get around a py 3.x astropy.table bug + + # ICRS to FK5 + c1 = ICRS(ra=r['ra_in']*u.deg, dec=r['dec_in']*u.deg) + c2 = c1.transform_to(FK5(equinox=Time(r['equinox_fk5'], scale='utc'))) + + # Find difference + diff = angular_separation(c2.ra.radian, c2.dec.radian, + np.radians(r['ra_fk5']), + np.radians(r['dec_fk5'])) + + diffarcsec1.append(np.degrees(diff) * 3600.) + + # FK5 to ICRS + c1 = FK5(ra=r['ra_in']*u.deg, dec=r['dec_in']*u.deg, + equinox=Time(r['equinox_fk5'], scale='utc')) + c2 = c1.transform_to(ICRS) + + # Find difference + diff = angular_separation(c2.ra.radian, c2.dec.radian, + np.radians(r['ra_icrs']), + np.radians(r['dec_icrs'])) + + diffarcsec2.append(np.degrees(diff) * 3600.) + + np.testing.assert_array_less(diffarcsec1, TOLERANCE) + np.testing.assert_array_less(diffarcsec2, TOLERANCE) diff --git a/astropy/coordinates/tests/test_angles.py b/astropy/coordinates/tests/test_angles.py new file mode 100644 index 0000000..d1adc84 --- /dev/null +++ b/astropy/coordinates/tests/test_angles.py @@ -0,0 +1,887 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +"""Test initalization and other aspects of Angle and subclasses""" + +import pytest +import numpy as np +from numpy.testing.utils import assert_allclose, assert_array_equal + +from ..angles import Longitude, Latitude, Angle +from ... import units as u +from ..errors import IllegalSecondError, IllegalMinuteError, IllegalHourError + + +def test_create_angles(): + """ + Tests creating and accessing Angle objects + """ + + ''' The "angle" is a fundamental object. The internal + representation is stored in radians, but this is transparent to the user. + Units *must* be specified rather than a default value be assumed. This is + as much for self-documenting code as anything else. + + Angle objects simply represent a single angular coordinate. More specific + angular coordinates (e.g. Longitude, Latitude) are subclasses of Angle.''' + + a1 = Angle(54.12412, unit=u.degree) + a2 = Angle("54.12412", unit=u.degree) + a3 = Angle("54:07:26.832", unit=u.degree) + a4 = Angle("54.12412 deg") + a5 = Angle("54.12412 degrees") + a6 = Angle("54.12412°") # because we like Unicode + a7 = Angle((54, 7, 26.832), unit=u.degree) + a8 = Angle("54°07'26.832\"") + # (deg,min,sec) *tuples* are acceptable, but lists/arrays are *not* + # because of the need to eventually support arrays of coordinates + a9 = Angle([54, 7, 26.832], unit=u.degree) + assert_allclose(a9.value, [54, 7, 26.832]) + assert a9.unit is u.degree + + a10 = Angle(3.60827466667, unit=u.hour) + a11 = Angle("3:36:29.7888000120", unit=u.hour) + a12 = Angle((3, 36, 29.7888000120), unit=u.hour) # *must* be a tuple + # Regression test for #5001 + a13 = Angle((3, 36, 29.7888000120), unit='hour') + + Angle(0.944644098745, unit=u.radian) + + with pytest.raises(u.UnitsError): + Angle(54.12412) + # raises an exception because this is ambiguous + + with pytest.raises(u.UnitsError): + Angle(54.12412, unit=u.m) + + with pytest.raises(ValueError): + Angle(12.34, unit="not a unit") + + a14 = Angle("03h36m29.7888000120") # no trailing 's', but unambiguous + + a15 = Angle("5h4m3s") # single digits, no decimal + assert a15.unit == u.hourangle + + a16 = Angle("1 d") + a17 = Angle("1 degree") + + assert a16.degree == 1 + assert a17.degree == 1 + + a18 = Angle("54 07.4472", unit=u.degree) + a19 = Angle("54:07.4472", unit=u.degree) + a20 = Angle("54d07.4472m", unit=u.degree) + a21 = Angle("3h36m", unit=u.hour) + a22 = Angle("3.6h", unit=u.hour) + a23 = Angle("- 3h", unit=u.hour) + a24 = Angle("+ 3h", unit=u.hour) + + # ensure the above angles that should match do + assert a1 == a2 == a3 == a4 == a5 == a6 == a7 == a8 == a18 == a19 == a20 + assert_allclose(a1.radian, a2.radian) + assert_allclose(a2.degree, a3.degree) + assert_allclose(a3.radian, a4.radian) + assert_allclose(a4.radian, a5.radian) + assert_allclose(a5.radian, a6.radian) + assert_allclose(a6.radian, a7.radian) + + assert_allclose(a10.degree, a11.degree) + assert a11 == a12 == a13 == a14 + assert a21 == a22 + assert a23 == -a24 + + # check for illegal ranges / values + with pytest.raises(IllegalSecondError): + a = Angle("12 32 99", unit=u.degree) + + with pytest.raises(IllegalMinuteError): + a = Angle("12 99 23", unit=u.degree) + + with pytest.raises(IllegalSecondError): + a = Angle("12 32 99", unit=u.hour) + + with pytest.raises(IllegalMinuteError): + a = Angle("12 99 23", unit=u.hour) + + with pytest.raises(IllegalHourError): + a = Angle("99 25 51.0", unit=u.hour) + + with pytest.raises(ValueError): + a = Angle("12 25 51.0xxx", unit=u.hour) + + with pytest.raises(ValueError): + a = Angle("12h34321m32.2s") + + assert a1 is not None + + +def test_angle_from_view(): + q = np.arange(3.) * u.deg + a = q.view(Angle) + assert type(a) is Angle + assert a.unit is q.unit + assert np.all(a == q) + + q2 = np.arange(4) * u.m + with pytest.raises(u.UnitTypeError): + q2.view(Angle) + + +def test_angle_ops(): + """ + Tests operations on Angle objects + """ + + # Angles can be added and subtracted. Multiplication and division by a + # scalar is also permitted. A negative operator is also valid. All of + # these operate in a single dimension. Attempting to multiply or divide two + # Angle objects will return a quantity. An exception will be raised if it + # is attempted to store output with a non-angular unit in an Angle [#2718]. + + a1 = Angle(3.60827466667, unit=u.hour) + a2 = Angle("54:07:26.832", unit=u.degree) + a1 + a2 # creates new Angle object + a1 - a2 + -a1 + + assert_allclose((a1 * 2).hour, 2 * 3.6082746666700003) + assert abs((a1 / 3.123456).hour - 3.60827466667 / 3.123456) < 1e-10 + + # commutativity + assert (2 * a1).hour == (a1 * 2).hour + + a3 = Angle(a1) # makes a *copy* of the object, but identical content as a1 + assert_allclose(a1.radian, a3.radian) + assert a1 is not a3 + + a4 = abs(-a1) + assert a4.radian == a1.radian + + a5 = Angle(5.0, unit=u.hour) + assert a5 > a1 + assert a5 >= a1 + assert a1 < a5 + assert a1 <= a5 + + # check operations with non-angular result give Quantity. + a6 = Angle(45., u.degree) + a7 = a6 * a5 + assert type(a7) is u.Quantity + + # but those with angular result yield Angle. + # (a9 is regression test for #5327) + a8 = a1 + 1.*u.deg + assert type(a8) is Angle + a9 = 1.*u.deg + a1 + assert type(a9) is Angle + + with pytest.raises(TypeError): + a6 *= a5 + + with pytest.raises(TypeError): + a6 *= u.m + + with pytest.raises(TypeError): + np.sin(a6, out=a6) + + +def test_angle_convert(): + """ + Test unit conversion of Angle objects + """ + angle = Angle("54.12412", unit=u.degree) + + assert_allclose(angle.hour, 3.60827466667) + assert_allclose(angle.radian, 0.944644098745) + assert_allclose(angle.degree, 54.12412) + + assert len(angle.hms) == 3 + assert isinstance(angle.hms, tuple) + assert angle.hms[0] == 3 + assert angle.hms[1] == 36 + assert_allclose(angle.hms[2], 29.78879999999947) + # also check that the namedtuple attribute-style access works: + assert angle.hms.h == 3 + assert angle.hms.m == 36 + assert_allclose(angle.hms.s, 29.78879999999947) + + assert len(angle.dms) == 3 + assert isinstance(angle.dms, tuple) + assert angle.dms[0] == 54 + assert angle.dms[1] == 7 + assert_allclose(angle.dms[2], 26.831999999992036) + # also check that the namedtuple attribute-style access works: + assert angle.dms.d == 54 + assert angle.dms.m == 7 + assert_allclose(angle.dms.s, 26.831999999992036) + + assert isinstance(angle.dms[0], float) + assert isinstance(angle.hms[0], float) + + # now make sure dms and signed_dms work right for negative angles + negangle = Angle("-54.12412", unit=u.degree) + + assert negangle.dms.d == -54 + assert negangle.dms.m == -7 + assert_allclose(negangle.dms.s, -26.831999999992036) + assert negangle.signed_dms.sign == -1 + assert negangle.signed_dms.d == 54 + assert negangle.signed_dms.m == 7 + assert_allclose(negangle.signed_dms.s, 26.831999999992036) + + +def test_angle_formatting(): + """ + Tests string formatting for Angle objects + """ + + ''' + The string method of Angle has this signature: + def string(self, unit=DEGREE, decimal=False, sep=" ", precision=5, + pad=False): + + The "decimal" parameter defaults to False since if you need to print the + Angle as a decimal, there's no need to use the "format" method (see + above). + ''' + + angle = Angle("54.12412", unit=u.degree) + + # __str__ is the default `format` + assert str(angle) == angle.to_string() + + res = 'Angle as HMS: 3h36m29.7888s' + assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour)) == res + + res = 'Angle as HMS: 3:36:29.7888' + assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, sep=":")) == res + + res = 'Angle as HMS: 3:36:29.79' + assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, sep=":", + precision=2)) == res + + # Note that you can provide one, two, or three separators passed as a + # tuple or list + + res = 'Angle as HMS: 3h36m29.7888s' + assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, + sep=("h", "m", "s"), + precision=4)) == res + + res = 'Angle as HMS: 3-36|29.7888' + assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, sep=["-", "|"], + precision=4)) == res + + res = 'Angle as HMS: 3-36-29.7888' + assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, sep="-", + precision=4)) == res + + res = 'Angle as HMS: 03h36m29.7888s' + assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, precision=4, + pad=True)) == res + + # Same as above, in degrees + + angle = Angle("3 36 29.78880", unit=u.degree) + + res = 'Angle as DMS: 3d36m29.7888s' + assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree)) == res + + res = 'Angle as DMS: 3:36:29.7888' + assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, sep=":")) == res + + res = 'Angle as DMS: 3:36:29.79' + assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, sep=":", + precision=2)) == res + + # Note that you can provide one, two, or three separators passed as a + # tuple or list + + res = 'Angle as DMS: 3d36m29.7888s' + assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, + sep=("d", "m", "s"), + precision=4)) == res + + res = 'Angle as DMS: 3-36|29.7888' + assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, sep=["-", "|"], + precision=4)) == res + + res = 'Angle as DMS: 3-36-29.7888' + assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, sep="-", + precision=4)) == res + + res = 'Angle as DMS: 03d36m29.7888s' + assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, precision=4, + pad=True)) == res + + res = 'Angle as rad: 0.0629763rad' + assert "Angle as rad: {0}".format(angle.to_string(unit=u.radian)) == res + + res = 'Angle as rad decimal: 0.0629763' + assert "Angle as rad decimal: {0}".format(angle.to_string(unit=u.radian, decimal=True)) == res + + # check negative angles + + angle = Angle(-1.23456789, unit=u.degree) + angle2 = Angle(-1.23456789, unit=u.hour) + + assert angle.to_string() == '-1d14m04.4444s' + assert angle.to_string(pad=True) == '-01d14m04.4444s' + assert angle.to_string(unit=u.hour) == '-0h04m56.2963s' + assert angle2.to_string(unit=u.hour, pad=True) == '-01h14m04.4444s' + assert angle.to_string(unit=u.radian, decimal=True) == '-0.0215473' + + +def test_to_string_vector(): + # Regression test for the fact that vectorize doesn't work with Numpy 1.6 + assert Angle([1./7., 1./7.], unit='deg').to_string()[0] == "0d08m34.2857s" + assert Angle([1./7.], unit='deg').to_string()[0] == "0d08m34.2857s" + assert Angle(1./7., unit='deg').to_string() == "0d08m34.2857s" + + +def test_angle_format_roundtripping(): + """ + Ensures that the string representation of an angle can be used to create a + new valid Angle. + """ + + a1 = Angle(0, unit=u.radian) + a2 = Angle(10, unit=u.degree) + a3 = Angle(0.543, unit=u.degree) + a4 = Angle('1d2m3.4s') + + assert Angle(str(a1)).degree == a1.degree + assert Angle(str(a2)).degree == a2.degree + assert Angle(str(a3)).degree == a3.degree + assert Angle(str(a4)).degree == a4.degree + + # also check Longitude/Latitude + ra = Longitude('1h2m3.4s') + dec = Latitude('1d2m3.4s') + + assert_allclose(Angle(str(ra)).degree, ra.degree) + assert_allclose(Angle(str(dec)).degree, dec.degree) + + +def test_radec(): + """ + Tests creation/operations of Longitude and Latitude objects + """ + + ''' + Longitude and Latitude are objects that are subclassed from Angle. As with Angle, Longitude + and Latitude can parse any unambiguous format (tuples, formatted strings, etc.). + + The intention is not to create an Angle subclass for every possible + coordinate object (e.g. galactic l, galactic b). However, equatorial Longitude/Latitude + are so prevalent in astronomy that it's worth creating ones for these + units. They will be noted as "special" in the docs and use of the just the + Angle class is to be used for other coordinate systems. + ''' + + with pytest.raises(u.UnitsError): + ra = Longitude("4:08:15.162342") # error - hours or degrees? + with pytest.raises(u.UnitsError): + ra = Longitude("-4:08:15.162342") + + # the "smart" initializer allows >24 to automatically do degrees, but the + # Angle-based one does not + # TODO: adjust in 0.3 for whatever behavior is decided on + + # ra = Longitude("26:34:15.345634") # unambiguous b/c hours don't go past 24 + # assert_allclose(ra.degree, 26.570929342) + with pytest.raises(u.UnitsError): + ra = Longitude("26:34:15.345634") + + # ra = Longitude(68) + with pytest.raises(u.UnitsError): + ra = Longitude(68) + + with pytest.raises(u.UnitsError): + ra = Longitude(12) + + with pytest.raises(ValueError): + ra = Longitude("garbage containing a d and no units") + + ra = Longitude("12h43m23s") + assert_allclose(ra.hour, 12.7230555556) + + ra = Longitude((56, 14, 52.52), unit=u.degree) # can accept tuples + # TODO: again, fix based on >24 behavior + # ra = Longitude((56,14,52.52)) + with pytest.raises(u.UnitsError): + ra = Longitude((56, 14, 52.52)) + with pytest.raises(u.UnitsError): + ra = Longitude((12, 14, 52)) # ambiguous w/o units + ra = Longitude((12, 14, 52), unit=u.hour) + + ra = Longitude([56, 64, 52.2], unit=u.degree) # ...but not arrays (yet) + + # Units can be specified + ra = Longitude("4:08:15.162342", unit=u.hour) + + # TODO: this was the "smart" initializer behavior - adjust in 0.3 appropriately + # Where Longitude values are commonly found in hours or degrees, declination is + # nearly always specified in degrees, so this is the default. + # dec = Latitude("-41:08:15.162342") + with pytest.raises(u.UnitsError): + dec = Latitude("-41:08:15.162342") + dec = Latitude("-41:08:15.162342", unit=u.degree) # same as above + + +def test_negative_zero_dms(): + # Test for DMS parser + a = Angle('-00:00:10', u.deg) + assert_allclose(a.degree, -10. / 3600.) + + # Unicode minus + a = Angle('−00:00:10', u.deg) + assert_allclose(a.degree, -10. / 3600.) + + +def test_negative_zero_dm(): + # Test for DM parser + a = Angle('-00:10', u.deg) + assert_allclose(a.degree, -10. / 60.) + + +def test_negative_zero_hms(): + # Test for HMS parser + a = Angle('-00:00:10', u.hour) + assert_allclose(a.hour, -10. / 3600.) + + +def test_negative_zero_hm(): + # Test for HM parser + a = Angle('-00:10', u.hour) + assert_allclose(a.hour, -10. / 60.) + + +def test_negative_sixty_hm(): + # Test for HM parser + a = Angle('-00:60', u.hour) + assert_allclose(a.hour, -1.) + + +def test_plus_sixty_hm(): + # Test for HM parser + a = Angle('00:60', u.hour) + assert_allclose(a.hour, 1.) + + +def test_negative_fifty_nine_sixty_dms(): + # Test for DMS parser + a = Angle('-00:59:60', u.deg) + assert_allclose(a.degree, -1.) + + +def test_plus_fifty_nine_sixty_dms(): + # Test for DMS parser + a = Angle('+00:59:60', u.deg) + assert_allclose(a.degree, 1.) + + +def test_negative_sixty_dms(): + # Test for DMS parser + a = Angle('-00:00:60', u.deg) + assert_allclose(a.degree, -1. / 60.) + + +def test_plus_sixty_dms(): + # Test for DMS parser + a = Angle('+00:00:60', u.deg) + assert_allclose(a.degree, 1. / 60.) + + +def test_angle_to_is_angle(): + a = Angle('00:00:60', u.deg) + assert isinstance(a, Angle) + assert isinstance(a.to(u.rad), Angle) + + +def test_angle_to_quantity(): + a = Angle('00:00:60', u.deg) + q = u.Quantity(a) + assert isinstance(q, u.Quantity) + assert q.unit is u.deg + + +def test_quantity_to_angle(): + a = Angle(1.0*u.deg) + assert isinstance(a, Angle) + with pytest.raises(u.UnitsError): + Angle(1.0*u.meter) + a = Angle(1.0*u.hour) + assert isinstance(a, Angle) + assert a.unit is u.hourangle + with pytest.raises(u.UnitsError): + Angle(1.0*u.min) + + +def test_angle_string(): + a = Angle('00:00:60', u.deg) + assert str(a) == '0d01m00s' + a = Angle('-00:00:10', u.hour) + assert str(a) == '-0h00m10s' + a = Angle(3.2, u.radian) + assert str(a) == '3.2rad' + a = Angle(4.2, u.microarcsecond) + assert str(a) == '4.2uarcsec' + a = Angle('1.0uarcsec') + assert a.value == 1.0 + assert a.unit == u.microarcsecond + a = Angle("3d") + assert_allclose(a.value, 3.0) + assert a.unit == u.degree + a = Angle('10"') + assert_allclose(a.value, 10.0) + assert a.unit == u.arcsecond + a = Angle("10'") + assert_allclose(a.value, 10.0) + assert a.unit == u.arcminute + + +def test_angle_repr(): + assert 'Angle' in repr(Angle(0, u.deg)) + assert 'Longitude' in repr(Longitude(0, u.deg)) + assert 'Latitude' in repr(Latitude(0, u.deg)) + + a = Angle(0, u.deg) + repr(a) + + +def test_large_angle_representation(): + """Test that angles above 360 degrees can be output as strings, + in repr, str, and to_string. (regression test for #1413)""" + a = Angle(350, u.deg) + Angle(350, u.deg) + a.to_string() + a.to_string(u.hourangle) + repr(a) + repr(a.to(u.hourangle)) + str(a) + str(a.to(u.hourangle)) + + +def test_wrap_at_inplace(): + a = Angle([-20, 150, 350, 360] * u.deg) + out = a.wrap_at('180d', inplace=True) + assert out is None + assert np.all(a.degree == np.array([-20., 150., -10., 0.])) + + +def test_latitude(): + with pytest.raises(ValueError): + lat = Latitude(['91d', '89d']) + with pytest.raises(ValueError): + lat = Latitude('-91d') + + lat = Latitude(['90d', '89d']) + # check that one can get items + assert lat[0] == 90 * u.deg + assert lat[1] == 89 * u.deg + # and that comparison with angles works + assert np.all(lat == Angle(['90d', '89d'])) + # check setitem works + lat[1] = 45. * u.deg + assert np.all(lat == Angle(['90d', '45d'])) + # but not with values out of range + with pytest.raises(ValueError): + lat[0] = 90.001 * u.deg + with pytest.raises(ValueError): + lat[0] = -90.001 * u.deg + # these should also not destroy input (#1851) + assert np.all(lat == Angle(['90d', '45d'])) + + # conserve type on unit change (closes #1423) + angle = lat.to('radian') + assert type(angle) is Latitude + # but not on calculations + angle = lat - 190 * u.deg + assert type(angle) is Angle + assert angle[0] == -100 * u.deg + + lat = Latitude('80d') + angle = lat / 2. + assert type(angle) is Angle + assert angle == 40 * u.deg + + angle = lat * 2. + assert type(angle) is Angle + assert angle == 160 * u.deg + + angle = -lat + assert type(angle) is Angle + assert angle == -80 * u.deg + + # Test errors when trying to interoperate with longitudes. + with pytest.raises(TypeError) as excinfo: + lon = Longitude(10, 'deg') + lat = Latitude(lon) + assert "A Latitude angle cannot be created from a Longitude angle" in str(excinfo) + + with pytest.raises(TypeError) as excinfo: + lon = Longitude(10, 'deg') + lat = Latitude([20], 'deg') + lat[0] = lon + assert "A Longitude angle cannot be assigned to a Latitude angle" in str(excinfo) + + # Check we can work around the Lat vs Long checks by casting explicitly to Angle. + lon = Longitude(10, 'deg') + lat = Latitude(Angle(lon)) + assert lat.value == 10.0 + # Check setitem. + lon = Longitude(10, 'deg') + lat = Latitude([20], 'deg') + lat[0] = Angle(lon) + assert lat.value[0] == 10.0 + + +def test_longitude(): + # Default wrapping at 360d with an array input + lon = Longitude(['370d', '88d']) + assert np.all(lon == Longitude(['10d', '88d'])) + assert np.all(lon == Angle(['10d', '88d'])) + + # conserve type on unit change and keep wrap_angle (closes #1423) + angle = lon.to('hourangle') + assert type(angle) is Longitude + assert angle.wrap_angle == lon.wrap_angle + angle = lon[0] + assert type(angle) is Longitude + assert angle.wrap_angle == lon.wrap_angle + angle = lon[1:] + assert type(angle) is Longitude + assert angle.wrap_angle == lon.wrap_angle + + # but not on calculations + angle = lon / 2. + assert np.all(angle == Angle(['5d', '44d'])) + assert type(angle) is Angle + assert not hasattr(angle, 'wrap_angle') + + angle = lon * 2. + 400 * u.deg + assert np.all(angle == Angle(['420d', '576d'])) + assert type(angle) is Angle + + # Test setting a mutable value and having it wrap + lon[1] = -10 * u.deg + assert np.all(lon == Angle(['10d', '350d'])) + + # Test wrapping and try hitting some edge cases + lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian) + assert np.all(lon.degree == np.array([0., 90, 180, 270, 0])) + + lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian, wrap_angle='180d') + assert np.all(lon.degree == np.array([0., 90, -180, -90, 0])) + + # Wrap on setting wrap_angle property (also test auto-conversion of wrap_angle to an Angle) + lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian) + lon.wrap_angle = '180d' + assert np.all(lon.degree == np.array([0., 90, -180, -90, 0])) + + lon = Longitude('460d') + assert lon == Angle('100d') + lon.wrap_angle = '90d' + assert lon == Angle('-260d') + + # check that if we initialize a longitude with another longitude, + # wrap_angle is kept by default + lon2 = Longitude(lon) + assert lon2.wrap_angle == lon.wrap_angle + # but not if we explicitly set it + lon3 = Longitude(lon, wrap_angle='180d') + assert lon3.wrap_angle == 180 * u.deg + + # check for problem reported in #2037 about Longitude initializing to -0 + lon = Longitude(0, u.deg) + lonstr = lon.to_string() + assert not lonstr.startswith('-') + + # also make sure dtype is correctly conserved + assert Longitude(0, u.deg, dtype=float).dtype == np.dtype(float) + assert Longitude(0, u.deg, dtype=int).dtype == np.dtype(int) + + # Test errors when trying to interoperate with latitudes. + with pytest.raises(TypeError) as excinfo: + lat = Latitude(10, 'deg') + lon = Longitude(lat) + assert "A Longitude angle cannot be created from a Latitude angle" in str(excinfo) + + with pytest.raises(TypeError) as excinfo: + lat = Latitude(10, 'deg') + lon = Longitude([20], 'deg') + lon[0] = lat + assert "A Latitude angle cannot be assigned to a Longitude angle" in str(excinfo) + + # Check we can work around the Lat vs Long checks by casting explicitly to Angle. + lat = Latitude(10, 'deg') + lon = Longitude(Angle(lat)) + assert lon.value == 10.0 + # Check setitem. + lat = Latitude(10, 'deg') + lon = Longitude([20], 'deg') + lon[0] = Angle(lat) + assert lon.value[0] == 10.0 + + +def test_wrap_at(): + a = Angle([-20, 150, 350, 360] * u.deg) + assert np.all(a.wrap_at(360 * u.deg).degree == np.array([340., 150., 350., 0.])) + assert np.all(a.wrap_at(Angle(360, unit=u.deg)).degree == np.array([340., 150., 350., 0.])) + assert np.all(a.wrap_at('360d').degree == np.array([340., 150., 350., 0.])) + assert np.all(a.wrap_at('180d').degree == np.array([-20., 150., -10., 0.])) + assert np.all(a.wrap_at(np.pi * u.rad).degree == np.array([-20., 150., -10., 0.])) + + # Test wrapping a scalar Angle + a = Angle('190d') + assert a.wrap_at('180d') == Angle('-170d') + + a = Angle(np.arange(-1000.0, 1000.0, 0.125), unit=u.deg) + for wrap_angle in (270, 0.2, 0.0, 360.0, 500, -2000.125): + aw = a.wrap_at(wrap_angle * u.deg) + assert np.all(aw.degree >= wrap_angle - 360.0) + assert np.all(aw.degree < wrap_angle) + + aw = a.to(u.rad).wrap_at(wrap_angle * u.deg) + assert np.all(aw.degree >= wrap_angle - 360.0) + assert np.all(aw.degree < wrap_angle) + + +def test_is_within_bounds(): + a = Angle([-20, 150, 350] * u.deg) + assert a.is_within_bounds('0d', '360d') is False + assert a.is_within_bounds(None, '360d') is True + assert a.is_within_bounds(-30 * u.deg, None) is True + + a = Angle('-20d') + assert a.is_within_bounds('0d', '360d') is False + assert a.is_within_bounds(None, '360d') is True + assert a.is_within_bounds(-30 * u.deg, None) is True + + +def test_angle_mismatched_unit(): + a = Angle('+6h7m8s', unit=u.degree) + assert_allclose(a.value, 91.78333333333332) + + +def test_regression_formatting_negative(): + # Regression test for a bug that caused: + # + # >>> Angle(-1., unit='deg').to_string() + # '-1d00m-0s' + assert Angle(-0., unit='deg').to_string() == '-0d00m00s' + assert Angle(-1., unit='deg').to_string() == '-1d00m00s' + assert Angle(-0., unit='hour').to_string() == '-0h00m00s' + assert Angle(-1., unit='hour').to_string() == '-1h00m00s' + + +def test_empty_sep(): + a = Angle('05h04m31.93830s') + + assert a.to_string(sep='', precision=2, pad=True) == '050431.94' + + +def test_create_tuple(): + """ + Tests creation of an angle with a (d,m,s) or (h,m,s) tuple + """ + a1 = Angle((1, 30, 0), unit=u.degree) + assert a1.value == 1.5 + + a1 = Angle((1, 30, 0), unit=u.hourangle) + assert a1.value == 1.5 + + +def test_list_of_quantities(): + a1 = Angle([1*u.deg, 1*u.hourangle]) + assert a1.unit == u.deg + assert_allclose(a1.value, [1, 15]) + + a2 = Angle([1*u.hourangle, 1*u.deg], u.deg) + assert a2.unit == u.deg + assert_allclose(a2.value, [15, 1]) + + +def test_multiply_divide(): + # Issue #2273 + a1 = Angle([1, 2, 3], u.deg) + a2 = Angle([4, 5, 6], u.deg) + a3 = a1 * a2 + assert_allclose(a3.value, [4, 10, 18]) + assert a3.unit == (u.deg * u.deg) + + a3 = a1 / a2 + assert_allclose(a3.value, [.25, .4, .5]) + assert a3.unit == u.dimensionless_unscaled + + +def test_mixed_string_and_quantity(): + a1 = Angle(['1d', 1. * u.deg]) + assert_array_equal(a1.value, [1., 1.]) + assert a1.unit == u.deg + + a2 = Angle(['1d', 1 * u.rad * np.pi, '3d']) + assert_array_equal(a2.value, [1., 180., 3.]) + assert a2.unit == u.deg + + +def test_array_angle_tostring(): + aobj = Angle([1, 2], u.deg) + assert aobj.to_string().dtype.kind == 'U' + assert np.all(aobj.to_string() == ['1d00m00s', '2d00m00s']) + + +def test_wrap_at_without_new(): + """ + Regression test for subtle bugs from situations where an Angle is + created via numpy channels that don't do the standard __new__ but instead + depend on array_finalize to set state. Longitude is used because the + bug was in its _wrap_angle not getting initialized correctly + """ + l1 = Longitude([1]*u.deg) + l2 = Longitude([2]*u.deg) + + l = np.concatenate([l1, l2]) + assert l._wrap_angle is not None + + +def test_repr_latex(): + """ + Check the _repr_latex_ method, used primarily by IPython notebooks + """ + + # try with both scalar + scangle = Angle(2.1, u.deg) + rlscangle = scangle._repr_latex_() + + # and array angles + arrangle = Angle([1, 2.1], u.deg) + rlarrangle = arrangle._repr_latex_() + + assert rlscangle == r'$2^\circ06{}^\prime00{}^{\prime\prime}$' + assert rlscangle.split('$')[1] in rlarrangle + + # make sure the ... appears for large arrays + bigarrangle = Angle(np.ones(50000)/50000., u.deg) + assert '...' in bigarrangle._repr_latex_() + + +def test_angle_with_cds_units_enabled(): + """Regression test for #5350 + + Especially the example in + https://github.com/astropy/astropy/issues/5350#issuecomment-248770151 + """ + from ...units import cds + # the problem is with the parser, so remove it temporarily + from ..angle_utilities import _AngleParser + del _AngleParser._parser + with cds.enable(): + Angle('5d') + del _AngleParser._parser + Angle('5d') diff --git a/astropy/coordinates/tests/test_angular_separation.py b/astropy/coordinates/tests/test_angular_separation.py new file mode 100644 index 0000000..d94612a --- /dev/null +++ b/astropy/coordinates/tests/test_angular_separation.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +""" +Tests for the projected separation stuff +""" + +import pytest +import numpy as np + +from ...tests.helper import assert_quantity_allclose as assert_allclose +from ...extern.six.moves import zip +from ... import units as u +from ..builtin_frames import ICRS, FK5, Galactic +from .. import Angle, Distance + +# lon1, lat1, lon2, lat2 in degrees +coords = [(1, 0, 0, 0), + (0, 1, 0, 0), + (0, 0, 1, 0), + (0, 0, 0, 1), + (0, 0, 10, 0), + (0, 0, 90, 0), + (0, 0, 180, 0), + (0, 45, 0, -45), + (0, 60, 0, -30), + (-135, -15, 45, 15), + (100, -89, -80, 89), + (0, 0, 0, 0), + (0, 0, 1. / 60., 1. / 60.)] +correct_seps = [1, 1, 1, 1, 10, 90, 180, 90, 90, 180, 180, 0, + 0.023570225877234643] +correctness_margin = 2e-10 + + +def test_angsep(): + """ + Tests that the angular separation object also behaves correctly. + """ + from ..angle_utilities import angular_separation + + # check it both works with floats in radians, Quantities, or Angles + for conv in (np.deg2rad, + lambda x: u.Quantity(x, "deg"), + lambda x: Angle(x, "deg")): + for (lon1, lat1, lon2, lat2), corrsep in zip(coords, correct_seps): + angsep = angular_separation(conv(lon1), conv(lat1), + conv(lon2), conv(lat2)) + assert np.fabs(angsep - conv(corrsep)) < conv(correctness_margin) + + +def test_fk5_seps(): + """ + This tests if `separation` works for FK5 objects. + + This is a regression test for github issue #891 + """ + a = FK5(1.*u.deg, 1.*u.deg) + b = FK5(2.*u.deg, 2.*u.deg) + a.separation(b) + + +def test_proj_separations(): + """ + Test angular separation functionality + """ + c1 = ICRS(ra=0*u.deg, dec=0*u.deg) + c2 = ICRS(ra=0*u.deg, dec=1*u.deg) + + sep = c2.separation(c1) + # returns an Angle object + assert isinstance(sep, Angle) + + assert sep.degree == 1 + assert_allclose(sep.arcminute, 60.) + + # these operations have ambiguous interpretations for points on a sphere + with pytest.raises(TypeError): + c1 + c2 + with pytest.raises(TypeError): + c1 - c2 + + ngp = Galactic(l=0*u.degree, b=90*u.degree) + ncp = ICRS(ra=0*u.degree, dec=90*u.degree) + + # if there is a defined conversion between the relevant coordinate systems, + # it will be automatically performed to get the right angular separation + assert_allclose(ncp.separation(ngp.transform_to(ICRS)).degree, + ncp.separation(ngp).degree) + + # distance from the north galactic pole to celestial pole + assert_allclose(ncp.separation(ngp.transform_to(ICRS)).degree, + 62.87174758503201) + + +def test_3d_separations(): + """ + Test 3D separation functionality + """ + c1 = ICRS(ra=1*u.deg, dec=1*u.deg, distance=9*u.kpc) + c2 = ICRS(ra=1*u.deg, dec=1*u.deg, distance=10*u.kpc) + + sep3d = c2.separation_3d(c1) + + assert isinstance(sep3d, Distance) + assert_allclose(sep3d - 1*u.kpc, 0*u.kpc, atol=1e-12*u.kpc) diff --git a/astropy/coordinates/tests/test_api_ape5.py b/astropy/coordinates/tests/test_api_ape5.py new file mode 100644 index 0000000..911642f --- /dev/null +++ b/astropy/coordinates/tests/test_api_ape5.py @@ -0,0 +1,449 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +""" +This is the APE5 coordinates API document re-written to work as a series of test +functions. + +Note that new tests for coordinates functionality should generally *not* be +added to this file - instead, add them to other appropriate test modules in +this package, like ``test_sky_coord.py``, ``test_frames.py``, or +``test_representation.py``. This file is instead meant mainly to keep track of +deviations from the original APE5 plan. +""" + +import pytest +import numpy as np +from numpy.random import randn +from numpy import testing as npt + +from ...tests.helper import (raises, quantity_allclose as allclose, + assert_quantity_allclose as assert_allclose) +from ... import units as u +from ... import time +from ... import coordinates as coords + +try: + import scipy # pylint: disable=W0611 +except ImportError: + HAS_SCIPY = False +else: + HAS_SCIPY = True + + +def test_representations_api(): + from ..representation import SphericalRepresentation, \ + UnitSphericalRepresentation, PhysicsSphericalRepresentation, \ + CartesianRepresentation + from ... coordinates import Angle, Longitude, Latitude, Distance + + # <-----------------Classes for representation of coordinate data--------------> + # These classes inherit from a common base class and internally contain Quantity + # objects, which are arrays (although they may act as scalars, like numpy's + # length-0 "arrays") + + # They can be initialized with a variety of ways that make intuitive sense. + # Distance is optional. + UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg) + UnitSphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg) + SphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg, distance=10*u.kpc) + + # In the initial implementation, the lat/lon/distance arguments to the + # initializer must be in order. A *possible* future change will be to allow + # smarter guessing of the order. E.g. `Latitude` and `Longitude` objects can be + # given in any order. + UnitSphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg)) + SphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg), Distance(10, u.kpc)) + + # Arrays of any of the inputs are fine + UnitSphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg) + + # Default is to copy arrays, but optionally, it can be a reference + UnitSphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, copy=False) + + # strings are parsed by `Latitude` and `Longitude` constructors, so no need to + # implement parsing in the Representation classes + UnitSphericalRepresentation(lon=Angle('2h6m3.3s'), lat=Angle('0.1rad')) + + # Or, you can give `Quantity`s with keywords, and they will be internally + # converted to Angle/Distance + c1 = SphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg, distance=10*u.kpc) + + # Can also give another representation object with the `reprobj` keyword. + c2 = SphericalRepresentation.from_representation(c1) + + # distance, lat, and lon typically will just match in shape + SphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, distance=[10, 11]*u.kpc) + # if the inputs are not the same, if possible they will be broadcast following + # numpy's standard broadcasting rules. + c2 = SphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, distance=10*u.kpc) + assert len(c2.distance) == 2 + # when they can't be broadcast, it is a ValueError (same as Numpy) + with raises(ValueError): + c2 = UnitSphericalRepresentation(lon=[8, 9, 10]*u.hourangle, lat=[5, 6]*u.deg) + + # It's also possible to pass in scalar quantity lists with mixed units. These + # are converted to array quantities following the same rule as `Quantity`: all + # elements are converted to match the first element's units. + c2 = UnitSphericalRepresentation(lon=Angle([8*u.hourangle, 135*u.deg]), + lat=Angle([5*u.deg, (6*np.pi/180)*u.rad])) + assert c2.lat.unit == u.deg and c2.lon.unit == u.hourangle + npt.assert_almost_equal(c2.lon[1].value, 9) + + # The Quantity initializer itself can also be used to force the unit even if the + # first element doesn't have the right unit + lon = u.Quantity([120*u.deg, 135*u.deg], u.hourangle) + lat = u.Quantity([(5*np.pi/180)*u.rad, 0.4*u.hourangle], u.deg) + c2 = UnitSphericalRepresentation(lon, lat) + + # regardless of how input, the `lat` and `lon` come out as angle/distance + assert isinstance(c1.lat, Angle) + assert isinstance(c1.lat, Latitude) # `Latitude` is an `Angle` subclass + assert isinstance(c1.distance, Distance) + + # but they are read-only, as representations are immutable once created + with raises(AttributeError): + c1.lat = Latitude(5, u.deg) + # Note that it is still possible to modify the array in-place, but this is not + # sanctioned by the API, as this would prevent things like caching. + c2.lat[:] = [0] * u.deg # possible, but NOT SUPPORTED + + # To address the fact that there are various other conventions for how spherical + # coordinates are defined, other conventions can be included as new classes. + # Later there may be other conventions that we implement - for now just the + # physics convention, as it is one of the most common cases. + c3 = PhysicsSphericalRepresentation(phi=120*u.deg, theta=85*u.deg, r=3*u.kpc) + + # first dimension must be length-3 if a lone `Quantity` is passed in. + c1 = CartesianRepresentation(randn(3, 100) * u.kpc) + assert c1.xyz.shape[0] == 3 + assert c1.xyz.unit == u.kpc + assert c1.x.shape[0] == 100 + assert c1.y.shape[0] == 100 + assert c1.z.shape[0] == 100 + # can also give each as separate keywords + CartesianRepresentation(x=randn(100)*u.kpc, y=randn(100)*u.kpc, z=randn(100)*u.kpc) + # if the units don't match but are all distances, they will automatically be + # converted to match `x` + xarr, yarr, zarr = randn(3, 100) + c1 = CartesianRepresentation(x=xarr*u.kpc, y=yarr*u.kpc, z=zarr*u.kpc) + c2 = CartesianRepresentation(x=xarr*u.kpc, y=yarr*u.kpc, z=zarr*u.pc) + assert c1.xyz.unit == c2.xyz.unit == u.kpc + assert_allclose((c1.z / 1000) - c2.z, 0*u.kpc, atol=1e-10*u.kpc) + + # representations convert into other representations via `represent_as` + srep = SphericalRepresentation(lon=90*u.deg, lat=0*u.deg, distance=1*u.pc) + crep = srep.represent_as(CartesianRepresentation) + assert_allclose(crep.x, 0*u.pc, atol=1e-10*u.pc) + assert_allclose(crep.y, 1*u.pc, atol=1e-10*u.pc) + assert_allclose(crep.z, 0*u.pc, atol=1e-10*u.pc) + # The functions that actually do the conversion are defined via methods on the + # representation classes. This may later be expanded into a full registerable + # transform graph like the coordinate frames, but initially it will be a simpler + # method system + + +def test_frame_api(): + from ..representation import SphericalRepresentation, \ + UnitSphericalRepresentation + from ..builtin_frames import ICRS, FK5 + # <--------------------Reference Frame/"Low-level" classes---------------------> + # The low-level classes have a dual role: they act as specifiers of coordinate + # frames and they *may* also contain data as one of the representation objects, + # in which case they are the actual coordinate objects themselves. + + # They can always accept a representation as a first argument + icrs = ICRS(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg)) + + # which is stored as the `data` attribute + assert icrs.data.lat == 5*u.deg + assert icrs.data.lon == 8*u.hourangle + + # Frames that require additional information like equinoxs or obstimes get them + # as keyword parameters to the frame constructor. Where sensible, defaults are + # used. E.g., FK5 is almost always J2000 equinox + fk5 = FK5(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg)) + J2000 = time.Time('J2000', scale='utc') + fk5_2000 = FK5(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg), equinox=J2000) + assert fk5.equinox == fk5_2000.equinox + + # the information required to specify the frame is immutable + J2001 = time.Time('J2001', scale='utc') + with raises(AttributeError): + fk5.equinox = J2001 + + # Similar for the representation data. + with raises(AttributeError): + fk5.data = UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg) + + # There is also a class-level attribute that lists the attributes needed to + # identify the frame. These include attributes like `equinox` shown above. + assert all(nm in ('equinox', 'obstime') for nm in fk5.get_frame_attr_names()) + + # the result of `get_frame_attr_names` is called for particularly in the + # high-level class (discussed below) to allow round-tripping between various + # frames. It is also part of the public API for other similar developer / + # advanced users' use. + + # The actual position information is accessed via the representation objects + assert_allclose(icrs.represent_as(SphericalRepresentation).lat, 5*u.deg) + # shorthand for the above + assert_allclose(icrs.spherical.lat, 5*u.deg) + assert icrs.cartesian.z.value > 0 + + # Many frames have a "default" representation, the one in which they are + # conventionally described, often with a special name for some of the + # coordinates. E.g., most equatorial coordinate systems are spherical with RA and + # Dec. This works simply as a shorthand for the longer form above + + assert_allclose(icrs.dec, 5*u.deg) + assert_allclose(fk5.ra, 8*u.hourangle) + + assert icrs.representation == SphericalRepresentation + + # low-level classes can also be initialized with names valid for that representation + # and frame: + icrs_2 = ICRS(ra=8*u.hour, dec=5*u.deg, distance=1*u.kpc) + assert_allclose(icrs.ra, icrs_2.ra) + + # and these are taken as the default if keywords are not given: + # icrs_nokwarg = ICRS(8*u.hour, 5*u.deg, distance=1*u.kpc) + # assert icrs_nokwarg.ra == icrs_2.ra and icrs_nokwarg.dec == icrs_2.dec + + # they also are capable of computing on-sky or 3d separations from each other, + # which will be a direct port of the existing methods: + coo1 = ICRS(ra=0*u.hour, dec=0*u.deg) + coo2 = ICRS(ra=0*u.hour, dec=1*u.deg) + # `separation` is the on-sky separation + assert coo1.separation(coo2).degree == 1.0 + + # while `separation_3d` includes the 3D distance information + coo3 = ICRS(ra=0*u.hour, dec=0*u.deg, distance=1*u.kpc) + coo4 = ICRS(ra=0*u.hour, dec=0*u.deg, distance=2*u.kpc) + assert coo3.separation_3d(coo4).kpc == 1.0 + + # The next example fails because `coo1` and `coo2` don't have distances + with raises(ValueError): + assert coo1.separation_3d(coo2).kpc == 1.0 + + # repr/str also shows info, with frame and data + # assert repr(fk5) == '' + + +def test_transform_api(): + from ..representation import UnitSphericalRepresentation + from ..builtin_frames import ICRS, FK5 + from ..baseframe import frame_transform_graph, BaseCoordinateFrame + from ..transformations import DynamicMatrixTransform + # <------------------------Transformations-------------------------------------> + # Transformation functionality is the key to the whole scheme: they transform + # low-level classes from one frame to another. + + # (used below but defined above in the API) + fk5 = FK5(ra=8*u.hour, dec=5*u.deg) + + # If no data (or `None`) is given, the class acts as a specifier of a frame, but + # without any stored data. + J2001 = time.Time('J2001', scale='utc') + fk5_J2001_frame = FK5(equinox=J2001) + + # if they do not have data, the string instead is the frame specification + assert repr(fk5_J2001_frame) == "" + + # Note that, although a frame object is immutable and can't have data added, it + # can be used to create a new object that does have data by giving the + # `realize_frame` method a representation: + srep = UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg) + fk5_j2001_with_data = fk5_J2001_frame.realize_frame(srep) + assert fk5_j2001_with_data.data is not None + # Now `fk5_j2001_with_data` is in the same frame as `fk5_J2001_frame`, but it + # is an actual low-level coordinate, rather than a frame without data. + + # These frames are primarily useful for specifying what a coordinate should be + # transformed *into*, as they are used by the `transform_to` method + # E.g., this snippet precesses the point to the new equinox + newfk5 = fk5.transform_to(fk5_J2001_frame) + assert newfk5.equinox == J2001 + + # classes can also be given to `transform_to`, which then uses the defaults for + # the frame information: + samefk5 = fk5.transform_to(FK5) + # `fk5` was initialized using default `obstime` and `equinox`, so: + assert_allclose(samefk5.ra, fk5.ra, atol=1e-10*u.deg) + assert_allclose(samefk5.dec, fk5.dec, atol=1e-10*u.deg) + + # transforming to a new frame necessarily loses framespec information if that + # information is not applicable to the new frame. This means transforms are not + # always round-trippable: + fk5_2 = FK5(ra=8*u.hour, dec=5*u.deg, equinox=J2001) + ic_trans = fk5_2.transform_to(ICRS) + + # `ic_trans` does not have an `equinox`, so now when we transform back to FK5, + # it's a *different* RA and Dec + fk5_trans = ic_trans.transform_to(FK5) + assert not allclose(fk5_2.ra, fk5_trans.ra, rtol=0, atol=1e-10*u.deg) + + # But if you explicitly give the right equinox, all is fine + fk5_trans_2 = fk5_2.transform_to(FK5(equinox=J2001)) + assert_allclose(fk5_2.ra, fk5_trans_2.ra, rtol=0, atol=1e-10*u.deg) + + # Trying to transforming a frame with no data is of course an error: + with raises(ValueError): + FK5(equinox=J2001).transform_to(ICRS) + + # To actually define a new transformation, the same scheme as in the + # 0.2/0.3 coordinates framework can be re-used - a graph of transform functions + # connecting various coordinate classes together. The main changes are: + # 1) The transform functions now get the frame object they are transforming the + # current data into. + # 2) Frames with additional information need to have a way to transform between + # objects of the same class, but with different framespecinfo values + + # An example transform function: + class SomeNewSystem(BaseCoordinateFrame): + pass + + @frame_transform_graph.transform(DynamicMatrixTransform, SomeNewSystem, FK5) + def new_to_fk5(newobj, fk5frame): + ot = newobj.obstime + eq = fk5frame.equinox + # ... build a *cartesian* transform matrix using `eq` that transforms from + # the `newobj` frame as observed at `ot` to FK5 an equinox `eq` + matrix = np.eye(3) + return matrix + + # Other options for transform functions include one that simply returns the new + # coordinate object, and one that returns a cartesian matrix but does *not* + # require `newobj` or `fk5frame` - this allows optimization of the transform. + + +def test_highlevel_api(): + J2001 = time.Time('J2001', scale='utc') + + # <--------------------------"High-level" class--------------------------------> + # The "high-level" class is intended to wrap the lower-level classes in such a + # way that they can be round-tripped, as well as providing a variety of + # convenience functionality. This document is not intended to show *all* of the + # possible high-level functionality, rather how the high-level classes are + # initialized and interact with the low-level classes + + # this creates an object that contains an `ICRS` low-level class, initialized + # identically to the first ICRS example further up. + + sc = coords.SkyCoord(coords.SphericalRepresentation(lon=8 * u.hour, + lat=5 * u.deg, distance=1 * u.kpc), frame='icrs') + + # Other representations and `system` keywords delegate to the appropriate + # low-level class. The already-existing registry for user-defined coordinates + # will be used by `SkyCoordinate` to figure out what various the `system` + # keyword actually means. + + sc = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, frame='icrs') + sc = coords.SkyCoord(l=120 * u.deg, b=5 * u.deg, frame='galactic') + + # High-level classes can also be initialized directly from low-level objects + sc = coords.SkyCoord(coords.ICRS(ra=8 * u.hour, dec=5 * u.deg)) + + # The next example raises an error because the high-level class must always + # have position data. + with pytest.raises(ValueError): + sc = coords.SkyCoord(coords.FK5(equinox=J2001)) # raises ValueError + + # similarly, the low-level object can always be accessed + + # this is how it's supposed to look, but sometimes the numbers get rounded in + # funny ways + # assert repr(sc.frame) == '' + rscf = repr(sc.frame) + assert rscf.startswith('' + rsc = repr(sc) + assert rsc.startswith('' + + if HAS_SCIPY: + cat1 = coords.SkyCoord(ra=[1, 2]*u.hr, dec=[3, 4.01]*u.deg, distance=[5, 6]*u.kpc, frame='icrs') + cat2 = coords.SkyCoord(ra=[1, 2, 2.01]*u.hr, dec=[3, 4, 5]*u.deg, distance=[5, 200, 6]*u.kpc, frame='icrs') + idx1, sep2d1, dist3d1 = cat1.match_to_catalog_sky(cat2) + idx2, sep2d2, dist3d2 = cat1.match_to_catalog_3d(cat2) + + assert np.any(idx1 != idx2) + + # additional convenience functionality for the future should be added as methods + # on `SkyCoord`, *not* the low-level classes. + + +@pytest.mark.remote_data +def test_highlevel_api_remote(): + m31icrs = coords.SkyCoord.from_name('M31', frame='icrs') + + m31str = str(m31icrs) + assert m31str.startswith('') + assert '10.68' in m31str + assert '41.26' in m31str + # The above is essentially a replacement of the below, but tweaked so that + # small/moderate changes in what `from_name` returns don't cause the tests + # to fail + # assert str(m31icrs) == '' + + m31fk4 = coords.SkyCoord.from_name('M31', frame='fk4') + + assert m31icrs.frame != m31fk4.frame + assert np.abs(m31icrs.ra - m31fk4.ra) > .5*u.deg diff --git a/astropy/coordinates/tests/test_arrays.py b/astropy/coordinates/tests/test_arrays.py new file mode 100644 index 0000000..d5c0ef8 --- /dev/null +++ b/astropy/coordinates/tests/test_arrays.py @@ -0,0 +1,272 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest +import numpy as np +from numpy import testing as npt + +from ... import units as u +from ...time import Time +from ...tests.helper import assert_quantity_allclose as assert_allclose + +from .. import (Angle, ICRS, FK4, FK5, Galactic, SkyCoord, + CartesianRepresentation) +from ..angle_utilities import dms_to_degrees, hms_to_hours + + +def test_angle_arrays(): + """ + Test arrays values with Angle objects. + """ + # Tests incomplete + a1 = Angle([0, 45, 90, 180, 270, 360, 720.], unit=u.degree) + npt.assert_almost_equal([0., 45., 90., 180., 270., 360., 720.], a1.value) + + a2 = Angle(np.array([-90, -45, 0, 45, 90, 180, 270, 360]), unit=u.degree) + npt.assert_almost_equal([-90, -45, 0, 45, 90, 180, 270, 360], + a2.value) + + a3 = Angle(["12 degrees", "3 hours", "5 deg", "4rad"]) + npt.assert_almost_equal([12., 45., 5., 229.18311805], + a3.value) + assert a3.unit == u.degree + + a4 = Angle(["12 degrees", "3 hours", "5 deg", "4rad"], u.radian) + npt.assert_almost_equal(a4.degree, a3.value) + assert a4.unit == u.radian + + a5 = Angle([0, 45, 90, 180, 270, 360], unit=u.degree) + a6 = a5.sum() + npt.assert_almost_equal(a6.value, 945.0) + assert a6.unit is u.degree + + with pytest.raises(TypeError): + # Arrays where the elements are Angle objects are not supported -- it's + # really tricky to do correctly, if at all, due to the possibility of + # nesting. + a7 = Angle([a1, a2, a3], unit=u.degree) + + a8 = Angle(["04:02:02", "03:02:01", "06:02:01"], unit=u.degree) + npt.assert_almost_equal(a8.value, [4.03388889, 3.03361111, 6.03361111]) + + a9 = Angle(np.array(["04:02:02", "03:02:01", "06:02:01"]), unit=u.degree) + npt.assert_almost_equal(a9.value, a8.value) + + with pytest.raises(u.UnitsError): + a10 = Angle(["04:02:02", "03:02:01", "06:02:01"]) + + +def test_dms(): + a1 = Angle([0, 45.5, -45.5], unit=u.degree) + d, m, s = a1.dms + npt.assert_almost_equal(d, [0, 45, -45]) + npt.assert_almost_equal(m, [0, 30, -30]) + npt.assert_almost_equal(s, [0, 0, -0]) + + dms = a1.dms + degrees = dms_to_degrees(*dms) + npt.assert_almost_equal(a1.degree, degrees) + + a2 = Angle(dms, unit=u.degree) + + npt.assert_almost_equal(a2.radian, a1.radian) + + +def test_hms(): + a1 = Angle([0, 11.5, -11.5], unit=u.hour) + h, m, s = a1.hms + npt.assert_almost_equal(h, [0, 11, -11]) + npt.assert_almost_equal(m, [0, 30, -30]) + npt.assert_almost_equal(s, [0, 0, -0]) + + hms = a1.hms + hours = hms_to_hours(*hms) + npt.assert_almost_equal(a1.hour, hours) + + a2 = Angle(hms, unit=u.hour) + + npt.assert_almost_equal(a2.radian, a1.radian) + + +def test_array_coordinates_creation(): + """ + Test creating coordinates from arrays. + """ + c = ICRS(np.array([1, 2])*u.deg, np.array([3, 4])*u.deg) + assert not c.ra.isscalar + + with pytest.raises(ValueError): + c = ICRS(np.array([1, 2])*u.deg, np.array([3, 4, 5])*u.deg) + with pytest.raises(ValueError): + c = ICRS(np.array([1, 2, 4, 5])*u.deg, np.array([[3, 4], [5, 6]])*u.deg) + + # make sure cartesian initialization also works + cart = CartesianRepresentation(x=[1., 2.]*u.kpc, y=[3., 4.]*u.kpc, z=[5., 6.]*u.kpc) + c = ICRS(cart) + + # also ensure strings can be arrays + c = SkyCoord(['1d0m0s', '2h02m00.3s'], ['3d', '4d']) + + # but invalid strings cannot + with pytest.raises(ValueError): + c = SkyCoord(Angle(['10m0s', '2h02m00.3s']), Angle(['3d', '4d'])) + with pytest.raises(ValueError): + c = SkyCoord(Angle(['1d0m0s', '2h02m00.3s']), Angle(['3x', '4d'])) + + +def test_array_coordinates_distances(): + """ + Test creating coordinates from arrays and distances. + """ + # correct way + ICRS(ra=np.array([1, 2])*u.deg, dec=np.array([3, 4])*u.deg, distance=[.1, .2] * u.kpc) + + with pytest.raises(ValueError): + # scalar distance and mismatched array coordinates + ICRS(ra=np.array([1, 2, 3])*u.deg, dec=np.array([[3, 4], [5, 6]])*u.deg, distance=2. * u.kpc) + with pytest.raises(ValueError): + # more distance values than coordinates + ICRS(ra=np.array([1, 2])*u.deg, dec=np.array([3, 4])*u.deg, distance=[.1, .2, 3.] * u.kpc) + + +@pytest.mark.parametrize(('arrshape', 'distance'), [((2, ), None), ((4, 2, 5), None), ((4, 2, 5), 2 * u.kpc)]) +def test_array_coordinates_transformations(arrshape, distance): + """ + Test transformation on coordinates with array content (first length-2 1D, then a 3D array) + """ + # M31 coordinates from test_transformations + raarr = np.ones(arrshape) * 10.6847929 + decarr = np.ones(arrshape) * 41.2690650 + if distance is not None: + distance = np.ones(arrshape) * distance + + print(raarr, decarr, distance) + c = ICRS(ra=raarr*u.deg, dec=decarr*u.deg, distance=distance) + g = c.transform_to(Galactic) + + assert g.l.shape == arrshape + + npt.assert_array_almost_equal(g.l.degree, 121.17440967) + npt.assert_array_almost_equal(g.b.degree, -21.57299631) + + if distance is not None: + assert g.distance.unit == c.distance.unit + + # now make sure round-tripping works through FK5 + c2 = c.transform_to(FK5).transform_to(ICRS) + npt.assert_array_almost_equal(c.ra.radian, c2.ra.radian) + npt.assert_array_almost_equal(c.dec.radian, c2.dec.radian) + + assert c2.ra.shape == arrshape + + if distance is not None: + assert c2.distance.unit == c.distance.unit + + # also make sure it's possible to get to FK4, which uses a direct transform function. + fk4 = c.transform_to(FK4) + + npt.assert_array_almost_equal(fk4.ra.degree, 10.0004, decimal=4) + npt.assert_array_almost_equal(fk4.dec.degree, 40.9953, decimal=4) + + assert fk4.ra.shape == arrshape + if distance is not None: + assert fk4.distance.unit == c.distance.unit + + # now check the reverse transforms run + cfk4 = fk4.transform_to(ICRS) + assert cfk4.ra.shape == arrshape + + +def test_array_precession(): + """ + Ensures that FK5 coordinates as arrays precess their equinoxes + """ + j2000 = Time('J2000', scale='utc') + j1975 = Time('J1975', scale='utc') + + fk5 = FK5([1, 1.1]*u.radian, [0.5, 0.6]*u.radian) + assert fk5.equinox.jyear == j2000.jyear + fk5_2 = fk5.transform_to(FK5(equinox=j1975)) + assert fk5_2.equinox.jyear == j1975.jyear + + npt.assert_array_less(0.05, np.abs(fk5.ra.degree - fk5_2.ra.degree)) + npt.assert_array_less(0.05, np.abs(fk5.dec.degree - fk5_2.dec.degree)) + + +def test_array_separation(): + c1 = ICRS([0, 0]*u.deg, [0, 0]*u.deg) + c2 = ICRS([1, 2]*u.deg, [0, 0]*u.deg) + + npt.assert_array_almost_equal(c1.separation(c2).degree, [1, 2]) + + c3 = ICRS([0, 3.]*u.deg, [0., 0]*u.deg, distance=[1, 1.] * u.kpc) + c4 = ICRS([1, 1.]*u.deg, [0., 0]*u.deg, distance=[1, 1.] * u.kpc) + + # the 3-1 separation should be twice the 0-1 separation, but not *exactly* the same + sep = c3.separation_3d(c4) + sepdiff = sep[1] - (2 * sep[0]) + + assert abs(sepdiff.value) < 1e-5 + assert sepdiff != 0 + + +def test_array_indexing(): + ra = np.linspace(0, 360, 10) + dec = np.linspace(-90, 90, 10) + j1975 = Time(1975, format='jyear', scale='utc') + + c1 = FK5(ra*u.deg, dec*u.deg, equinox=j1975) + + c2 = c1[4] + assert c2.ra.degree == 160 + assert c2.dec.degree == -10 + + c3 = c1[2:5] + assert_allclose(c3.ra, [80, 120, 160] * u.deg) + assert_allclose(c3.dec, [-50, -30, -10] * u.deg) + + c4 = c1[np.array([2, 5, 8])] + + assert_allclose(c4.ra, [80, 200, 320] * u.deg) + assert_allclose(c4.dec, [-50, 10, 70] * u.deg) + + # now make sure the equinox is preserved + assert c2.equinox == c1.equinox + assert c3.equinox == c1.equinox + assert c4.equinox == c1.equinox + + +def test_array_len(): + input_length = [1, 5] + for length in input_length: + ra = np.linspace(0, 360, length) + dec = np.linspace(0, 90, length) + + c = ICRS(ra*u.deg, dec*u.deg) + + assert len(c) == length + + assert c.shape == (length,) + + with pytest.raises(TypeError): + c = ICRS(0*u.deg, 0*u.deg) + len(c) + + assert c.shape == tuple() + + +def test_array_eq(): + c1 = ICRS([1, 2]*u.deg, [3, 4]*u.deg) + c2 = ICRS([1, 2]*u.deg, [3, 5]*u.deg) + c3 = ICRS([1, 3]*u.deg, [3, 4]*u.deg) + c4 = ICRS([1, 2]*u.deg, [3, 4.2]*u.deg) + + assert c1 == c1 + assert c1 != c2 + assert c1 != c3 + assert c1 != c4 diff --git a/astropy/coordinates/tests/test_atc_replacements.py b/astropy/coordinates/tests/test_atc_replacements.py new file mode 100644 index 0000000..2a7d7da --- /dev/null +++ b/astropy/coordinates/tests/test_atc_replacements.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +"""Test replacements for ERFA functions atciqz and aticq.""" + +from itertools import product + +import pytest + +from ...tests.helper import assert_quantity_allclose as assert_allclose +from ...time import Time +from ... import _erfa as erfa +from .utils import randomly_sample_sphere +from ..builtin_frames.utils import get_jd12, atciqz, aticq + +times = [Time("2014-06-25T00:00"), Time(["2014-06-25T00:00", "2014-09-24"])] +ra, dec, _ = randomly_sample_sphere(2) +positions = ((ra[0], dec[0]), (ra, dec)) +spacetimes = product(times, positions) + + +@pytest.mark.parametrize('st', spacetimes) +def test_atciqz_aticq(st): + """Check replacements against erfa versions for consistency.""" + t, pos = st + jd1, jd2 = get_jd12(t, 'tdb') + astrom, _ = erfa.apci13(jd1, jd2) + + ra, dec = pos + ra = ra.value + dec = dec.value + assert_allclose(erfa.atciqz(ra, dec, astrom), atciqz(ra, dec, astrom)) + assert_allclose(erfa.aticq(ra, dec, astrom), aticq(ra, dec, astrom)) diff --git a/astropy/coordinates/tests/test_celestial_transformations.py b/astropy/coordinates/tests/test_celestial_transformations.py new file mode 100644 index 0000000..afd110f --- /dev/null +++ b/astropy/coordinates/tests/test_celestial_transformations.py @@ -0,0 +1,295 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest +import numpy as np + +from ... import units as u +from ..distances import Distance +from ..builtin_frames import (ICRS, FK5, FK4, FK4NoETerms, Galactic, + Supergalactic, Galactocentric, HCRS, GCRS, LSR) +from .. import SkyCoord +from ...tests.helper import (quantity_allclose as allclose, + assert_quantity_allclose as assert_allclose) +from .. import EarthLocation, CartesianRepresentation +from ...time import Time + +from ...extern.six.moves import range + +# used below in the next parametrized test +m31_sys = [ICRS, FK5, FK4, Galactic] +m31_coo = [(10.6847929, 41.2690650), (10.6847929, 41.2690650), (10.0004738, 40.9952444), (121.1744050, -21.5729360)] +m31_dist = Distance(770, u.kpc) +convert_precision = 1 * u.arcsec +roundtrip_precision = 1e-4 * u.degree +dist_precision = 1e-9 * u.kpc + +m31_params = [] +for i in range(len(m31_sys)): + for j in range(len(m31_sys)): + if i < j: + m31_params.append((m31_sys[i], m31_sys[j], m31_coo[i], m31_coo[j])) + + +@pytest.mark.parametrize(('fromsys', 'tosys', 'fromcoo', 'tocoo'), m31_params) +def test_m31_coord_transforms(fromsys, tosys, fromcoo, tocoo): + """ + This tests a variety of coordinate conversions for the Chandra point-source + catalog location of M31 from NED. + """ + coo1 = fromsys(ra=fromcoo[0]*u.deg, dec=fromcoo[1]*u.deg, distance=m31_dist) + coo2 = coo1.transform_to(tosys) + if tosys is FK4: + coo2_prec = coo2.transform_to(FK4(equinox=Time('B1950', scale='utc'))) + assert (coo2_prec.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec + assert (coo2_prec.spherical.lat - tocoo[1]*u.deg) < convert_precision + else: + assert (coo2.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec + assert (coo2.spherical.lat - tocoo[1]*u.deg) < convert_precision + assert coo1.distance.unit == u.kpc + assert coo2.distance.unit == u.kpc + assert m31_dist.unit == u.kpc + assert (coo2.distance - m31_dist) < dist_precision + + # check round-tripping + coo1_2 = coo2.transform_to(fromsys) + assert (coo1_2.spherical.lon - fromcoo[0]*u.deg) < roundtrip_precision + assert (coo1_2.spherical.lat - fromcoo[1]*u.deg) < roundtrip_precision + assert (coo1_2.distance - m31_dist) < dist_precision + + +def test_precession(): + """ + Ensures that FK4 and FK5 coordinates precess their equinoxes + """ + j2000 = Time('J2000', scale='utc') + b1950 = Time('B1950', scale='utc') + j1975 = Time('J1975', scale='utc') + b1975 = Time('B1975', scale='utc') + + fk4 = FK4(ra=1*u.radian, dec=0.5*u.radian) + assert fk4.equinox.byear == b1950.byear + fk4_2 = fk4.transform_to(FK4(equinox=b1975)) + assert fk4_2.equinox.byear == b1975.byear + + fk5 = FK5(ra=1*u.radian, dec=0.5*u.radian) + assert fk5.equinox.jyear == j2000.jyear + fk5_2 = fk5.transform_to(FK4(equinox=j1975)) + assert fk5_2.equinox.jyear == j1975.jyear + + +def test_fk5_galactic(): + """ + Check that FK5 -> Galactic gives the same as FK5 -> FK4 -> Galactic. + """ + + fk5 = FK5(ra=1*u.deg, dec=2*u.deg) + + direct = fk5.transform_to(Galactic) + indirect = fk5.transform_to(FK4).transform_to(Galactic) + + assert direct.separation(indirect).degree < 1.e-10 + + direct = fk5.transform_to(Galactic) + indirect = fk5.transform_to(FK4NoETerms).transform_to(Galactic) + + assert direct.separation(indirect).degree < 1.e-10 + + +def test_galactocentric(): + # when z_sun=0, transformation should be very similar to Galactic + icrs_coord = ICRS(ra=np.linspace(0, 360, 10)*u.deg, + dec=np.linspace(-90, 90, 10)*u.deg, + distance=1.*u.kpc) + + g_xyz = icrs_coord.transform_to(Galactic).cartesian.xyz + gc_xyz = icrs_coord.transform_to(Galactocentric(z_sun=0*u.kpc)).cartesian.xyz + diff = np.abs(g_xyz - gc_xyz) + + assert allclose(diff[0], 8.3*u.kpc, atol=1E-5*u.kpc) + assert allclose(diff[1:], 0*u.kpc, atol=1E-5*u.kpc) + + # generate some test coordinates + g = Galactic(l=[0, 0, 45, 315]*u.deg, b=[-45, 45, 0, 0]*u.deg, + distance=[np.sqrt(2)]*4*u.kpc) + xyz = g.transform_to(Galactocentric(galcen_distance=1.*u.kpc, z_sun=0.*u.pc)).cartesian.xyz + true_xyz = np.array([[0, 0, -1.], [0, 0, 1], [0, 1, 0], [0, -1, 0]]).T*u.kpc + assert allclose(xyz.to(u.kpc), true_xyz.to(u.kpc), atol=1E-5*u.kpc) + + # check that ND arrays work + + # from Galactocentric to Galactic + x = np.linspace(-10., 10., 100) * u.kpc + y = np.linspace(-10., 10., 100) * u.kpc + z = np.zeros_like(x) + + g1 = Galactocentric(x=x, y=y, z=z) + g2 = Galactocentric(x=x.reshape(100, 1, 1), y=y.reshape(100, 1, 1), + z=z.reshape(100, 1, 1)) + + g1t = g1.transform_to(Galactic) + g2t = g2.transform_to(Galactic) + + assert_allclose(g1t.cartesian.xyz, g2t.cartesian.xyz[:, :, 0, 0]) + + # from Galactic to Galactocentric + l = np.linspace(15, 30., 100) * u.deg + b = np.linspace(-10., 10., 100) * u.deg + d = np.ones_like(l.value) * u.kpc + + g1 = Galactic(l=l, b=b, distance=d) + g2 = Galactic(l=l.reshape(100, 1, 1), b=b.reshape(100, 1, 1), + distance=d.reshape(100, 1, 1)) + + g1t = g1.transform_to(Galactocentric) + g2t = g2.transform_to(Galactocentric) + + np.testing.assert_almost_equal(g1t.cartesian.xyz.value, + g2t.cartesian.xyz.value[:, :, 0, 0]) + + +def test_supergalactic(): + """ + Check Galactic<->Supergalactic and Galactic<->ICRS conversion. + """ + # Check supergalactic North pole. + npole = Galactic(l=47.37*u.degree, b=+6.32*u.degree) + assert allclose(npole.transform_to(Supergalactic).sgb.deg, +90, atol=1e-9) + + # Check the origin of supergalactic longitude. + lon0 = Supergalactic(sgl=0*u.degree, sgb=0*u.degree) + lon0_gal = lon0.transform_to(Galactic) + assert allclose(lon0_gal.l.deg, 137.37, atol=1e-9) + assert allclose(lon0_gal.b.deg, 0, atol=1e-9) + + # Test Galactic<->ICRS with some positions that appear in Foley et al. 2008 + # (http://adsabs.harvard.edu/abs/2008A%26A...484..143F) + + # GRB 021219 + supergalactic = Supergalactic(sgl=29.91*u.degree, sgb=+73.72*u.degree) + icrs = SkyCoord('18h50m27s +31d57m17s') + assert supergalactic.separation(icrs) < 0.005 * u.degree + + # GRB 030320 + supergalactic = Supergalactic(sgl=-174.44*u.degree, sgb=+46.17*u.degree) + icrs = SkyCoord('17h51m36s -25d18m52s') + assert supergalactic.separation(icrs) < 0.005 * u.degree + + +class TestHCRS(): + """ + Check HCRS<->ICRS coordinate conversions. + + Uses ICRS Solar positions predicted by get_body_barycentric; with `t1` and + `tarr` as defined below, the ICRS Solar positions were predicted using, e.g. + coord.ICRS(coord.get_body_barycentric(tarr, 'sun')). + """ + + def setup(self): + self.t1 = Time("2013-02-02T23:00") + self.t2 = Time("2013-08-02T23:00") + self.tarr = Time(["2013-02-02T23:00", "2013-08-02T23:00"]) + + self.sun_icrs_scalar = ICRS(ra=244.52984668*u.deg, + dec=-22.36943723*u.deg, + distance=406615.66347377*u.km) + # array of positions corresponds to times in `tarr` + self.sun_icrs_arr = ICRS(ra=[244.52989062, 271.40976248]*u.deg, + dec=[-22.36943605, -25.07431079]*u.deg, + distance=[406615.66347377, 375484.13558956]*u.km) + + # corresponding HCRS positions + self.sun_hcrs_t1 = HCRS(CartesianRepresentation([0.0, 0.0, 0.0] * u.km), + obstime=self.t1) + twod_rep = CartesianRepresentation([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]] * u.km) + self.sun_hcrs_tarr = HCRS(twod_rep, obstime=self.tarr) + self.tolerance = 5*u.km + + def test_from_hcrs(self): + # test scalar transform + transformed = self.sun_hcrs_t1.transform_to(ICRS()) + separation = transformed.separation_3d(self.sun_icrs_scalar) + assert_allclose(separation, 0*u.km, atol=self.tolerance) + + # test non-scalar positions and times + transformed = self.sun_hcrs_tarr.transform_to(ICRS()) + separation = transformed.separation_3d(self.sun_icrs_arr) + assert_allclose(separation, 0*u.km, atol=self.tolerance) + + def test_from_icrs(self): + # scalar positions + transformed = self.sun_icrs_scalar.transform_to(HCRS(obstime=self.t1)) + separation = transformed.separation_3d(self.sun_hcrs_t1) + assert_allclose(separation, 0*u.km, atol=self.tolerance) + # nonscalar positions + transformed = self.sun_icrs_arr.transform_to(HCRS(obstime=self.tarr)) + separation = transformed.separation_3d(self.sun_hcrs_tarr) + assert_allclose(separation, 0*u.km, atol=self.tolerance) + + +class TestHelioBaryCentric(): + """ + Check GCRS<->Heliocentric and Barycentric coordinate conversions. + + Uses the WHT observing site (information grabbed from data/sites.json). + """ + + def setup(self): + wht = EarthLocation(342.12*u.deg, 28.758333333333333*u.deg, 2327*u.m) + self.obstime = Time("2013-02-02T23:00") + self.wht_itrs = wht.get_itrs(obstime=self.obstime) + + def test_heliocentric(self): + gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime)) + helio = gcrs.transform_to(HCRS(obstime=self.obstime)) + # Check it doesn't change from previous times. + previous = [-1.02597256e+11, 9.71725820e+10, 4.21268419e+10] * u.m + assert_allclose(helio.cartesian.xyz, previous) + + # And that it agrees with SLALIB to within 14km + helio_slalib = [-0.685820296, 0.6495585893, 0.2816005464] * u.au + assert np.sqrt(((helio.cartesian.xyz - + helio_slalib)**2).sum()) < 14. * u.km + + def test_barycentric(self): + gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime)) + bary = gcrs.transform_to(ICRS()) + previous = [-1.02758958e+11, 9.68331109e+10, 4.19720938e+10] * u.m + assert_allclose(bary.cartesian.xyz, previous) + + # And that it agrees with SLALIB answer to within 14km + bary_slalib = [-0.6869012079, 0.6472893646, 0.2805661191] * u.au + assert np.sqrt(((bary.cartesian.xyz - + bary_slalib)**2).sum()) < 14. * u.km + + +def test_lsr_sanity(): + + # random numbers, but zero velocity in ICRS frame + icrs = ICRS(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc, + pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr, + radial_velocity=0*u.km/u.s) + lsr = icrs.transform_to(LSR) + + lsr_diff = lsr.data.differentials['s'] + cart_lsr_vel = lsr_diff.represent_as(CartesianRepresentation, base=lsr.data) + lsr_vel = ICRS(cart_lsr_vel) + gal_lsr = lsr_vel.transform_to(Galactic).cartesian.xyz + assert allclose(gal_lsr.to(u.km/u.s, u.dimensionless_angles()), + lsr.v_bary.d_xyz) + + # moving with LSR velocity + lsr = LSR(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc, + pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr, + radial_velocity=0*u.km/u.s) + icrs = lsr.transform_to(ICRS) + + icrs_diff = icrs.data.differentials['s'] + cart_vel = icrs_diff.represent_as(CartesianRepresentation, base=icrs.data) + vel = ICRS(cart_vel) + gal_icrs = vel.transform_to(Galactic).cartesian.xyz + assert allclose(gal_icrs.to(u.km/u.s, u.dimensionless_angles()), + -lsr.v_bary.d_xyz) diff --git a/astropy/coordinates/tests/test_distance.py b/astropy/coordinates/tests/test_distance.py new file mode 100644 index 0000000..4c50177 --- /dev/null +++ b/astropy/coordinates/tests/test_distance.py @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +""" +This includes tests for the Distance class and related calculations +""" + +import pytest +import numpy as np +from numpy import testing as npt + +from ... import units as u +from .. import Longitude, Latitude, Distance, CartesianRepresentation +from ..builtin_frames import ICRS, Galactic + +try: + import scipy # pylint: disable=W0611 +except ImportError: + HAS_SCIPY = False +else: + HAS_SCIPY = True + + +def test_distances(): + """ + Tests functionality for Coordinate class distances and cartesian + transformations. + """ + + ''' + Distances can also be specified, and allow for a full 3D definition of a + coordinate. + ''' + + # try all the different ways to initialize a Distance + distance = Distance(12, u.parsec) + Distance(40, unit=u.au) + Distance(value=5, unit=u.kpc) + + # need to provide a unit + with pytest.raises(u.UnitsError): + Distance(12) + + # standard units are pre-defined + npt.assert_allclose(distance.lyr, 39.138765325702551) + npt.assert_allclose(distance.km, 370281309776063.0) + + # Coordinate objects can be assigned a distance object, giving them a full + # 3D position + c = Galactic(l=158.558650*u.degree, b=-43.350066*u.degree, + distance=Distance(12, u.parsec)) + + # or initialize distances via redshifts - this is actually tested in the + # function below that checks for scipy. This is kept here as an example + # c.distance = Distance(z=0.2) # uses current cosmology + # with whatever your preferred cosmology may be + # c.distance = Distance(z=0.2, cosmology=WMAP5) + + # Coordinate objects can be initialized with a distance using special + # syntax + c1 = Galactic(l=158.558650*u.deg, b=-43.350066*u.deg, distance=12 * u.kpc) + + # Coordinate objects can be instantiated with cartesian coordinates + # Internally they will immediately be converted to two angles + a distance + cart = CartesianRepresentation(x=2 * u.pc, y=4 * u.pc, z=8 * u.pc) + c2 = Galactic(cart) + + sep12 = c1.separation_3d(c2) + # returns a *3d* distance between the c1 and c2 coordinates + # not that this does *not* + assert isinstance(sep12, Distance) + npt.assert_allclose(sep12.pc, 12005.784163916317, 10) + + ''' + All spherical coordinate systems with distances can be converted to + cartesian coordinates. + ''' + + cartrep2 = c2.cartesian + assert isinstance(cartrep2.x, u.Quantity) + npt.assert_allclose(cartrep2.x.value, 2) + npt.assert_allclose(cartrep2.y.value, 4) + npt.assert_allclose(cartrep2.z.value, 8) + + # with no distance, the unit sphere is assumed when converting to cartesian + c3 = Galactic(l=158.558650*u.degree, b=-43.350066*u.degree, distance=None) + unitcart = c3.cartesian + npt.assert_allclose(((unitcart.x**2 + unitcart.y**2 + + unitcart.z**2)**0.5).value, 1.0) + + # TODO: choose between these when CartesianRepresentation gets a definite + # decision on whether or not it gets __add__ + # + # CartesianRepresentation objects can be added and subtracted, which are + # vector/elementwise they can also be given as arguments to a coordinate + # system + # csum = ICRS(c1.cartesian + c2.cartesian) + csumrep = CartesianRepresentation(c1.cartesian.xyz + c2.cartesian.xyz) + csum = ICRS(csumrep) + + npt.assert_allclose(csumrep.x.value, -8.12016610185) + npt.assert_allclose(csumrep.y.value, 3.19380597435) + npt.assert_allclose(csumrep.z.value, -8.2294483707) + npt.assert_allclose(csum.ra.degree, 158.529401774) + npt.assert_allclose(csum.dec.degree, -43.3235825777) + npt.assert_allclose(csum.distance.kpc, 11.9942200501) + + +@pytest.mark.skipif(str('not HAS_SCIPY')) +def test_distances_scipy(): + """ + The distance-related tests that require scipy due to the cosmology + module needing scipy integration routines + """ + from ...cosmology import WMAP5 + + # try different ways to initialize a Distance + d4 = Distance(z=0.23) # uses default cosmology - as of writing, WMAP7 + npt.assert_allclose(d4.z, 0.23, rtol=1e-8) + + d5 = Distance(z=0.23, cosmology=WMAP5) + npt.assert_allclose(d5.compute_z(WMAP5), 0.23, rtol=1e-8) + + d6 = Distance(z=0.23, cosmology=WMAP5, unit=u.km) + npt.assert_allclose(d6.value, 3.5417046898762366e+22) + + +def test_distance_change(): + + ra = Longitude("4:08:15.162342", unit=u.hour) + dec = Latitude("-41:08:15.162342", unit=u.degree) + c1 = ICRS(ra, dec, Distance(1, unit=u.kpc)) + + oldx = c1.cartesian.x.value + assert (oldx - 0.35284083171901953) < 1e-10 + + # first make sure distances are immutible + with pytest.raises(AttributeError): + c1.distance = Distance(2, unit=u.kpc) + + # now x should increase with a bigger distance increases + c2 = ICRS(ra, dec, Distance(2, unit=u.kpc)) + assert c2.cartesian.x.value == oldx * 2 + + +def test_distance_is_quantity(): + """ + test that distance behaves like a proper quantity + """ + + Distance(2 * u.kpc) + + d = Distance([2, 3.1], u.kpc) + + assert d.shape == (2,) + + a = d.view(np.ndarray) + q = d.view(u.Quantity) + a[0] = 1.2 + q.value[1] = 5.4 + + assert d[0].value == 1.2 + assert d[1].value == 5.4 + + q = u.Quantity(d, copy=True) + q.value[1] = 0 + assert q.value[1] == 0 + assert d.value[1] != 0 + + # regression test against #2261 + d = Distance([2 * u.kpc, 250. * u.pc]) + assert d.unit is u.kpc + assert np.all(d.value == np.array([2., 0.25])) + + +def test_distmod(): + + d = Distance(10, u.pc) + assert d.distmod.value == 0 + + d = Distance(distmod=20) + assert d.distmod.value == 20 + assert d.kpc == 100 + + d = Distance(distmod=-1., unit=u.au) + npt.assert_allclose(d.value, 1301442.9440836983) + + with pytest.raises(ValueError): + d = Distance(value=d, distmod=20) + + with pytest.raises(ValueError): + d = Distance(z=.23, distmod=20) + + # check the Mpc/kpc/pc behavior + assert Distance(distmod=1).unit == u.pc + assert Distance(distmod=11).unit == u.kpc + assert Distance(distmod=26).unit == u.Mpc + assert Distance(distmod=-21).unit == u.AU + + # if an array, uses the mean of the log of the distances + assert Distance(distmod=[1, 11, 26]).unit == u.kpc + + +def test_distance_in_coordinates(): + """ + test that distances can be created from quantities and that cartesian + representations come out right + """ + + ra = Longitude("4:08:15.162342", unit=u.hour) + dec = Latitude("-41:08:15.162342", unit=u.degree) + coo = ICRS(ra, dec, distance=2*u.kpc) + + cart = coo.cartesian + + assert isinstance(cart.xyz, u.Quantity) + + +def test_negative_distance(): + """ Test optional kwarg allow_negative """ + + with pytest.raises(ValueError): + Distance([-2, 3.1], u.kpc) + + with pytest.raises(ValueError): + Distance([-2, -3.1], u.kpc) + + with pytest.raises(ValueError): + Distance(-2, u.kpc) + + d = Distance(-2, u.kpc, allow_negative=True) + assert d.value == -2 + + +def test_distance_comparison(): + """Ensure comparisons of distances work (#2206, #2250)""" + a = Distance(15*u.kpc) + b = Distance(15*u.kpc) + assert a == b + c = Distance(1.*u.Mpc) + assert a < c + + +def test_distance_to_quantity_when_not_units_of_length(): + """Any operatation that leaves units other than those of length + should turn a distance into a quantity (#2206, #2250)""" + d = Distance(15*u.kpc) + twice = 2.*d + assert isinstance(twice, Distance) + area = 4.*np.pi*d**2 + assert area.unit.is_equivalent(u.m**2) + assert not isinstance(area, Distance) + assert type(area) is u.Quantity diff --git a/astropy/coordinates/tests/test_earth.py b/astropy/coordinates/tests/test_earth.py new file mode 100644 index 0000000..a200ea0 --- /dev/null +++ b/astropy/coordinates/tests/test_earth.py @@ -0,0 +1,322 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +"""Test initialization of angles not already covered by the API tests""" + +import pickle + +import pytest +import numpy as np + +from ..earth import EarthLocation, ELLIPSOIDS +from ..angles import Longitude, Latitude +from ...tests.helper import quantity_allclose, remote_data +from ...extern.six.moves import zip +from ... import units as u +from ..name_resolve import NameResolveError + + +def allclose_m14(a, b, rtol=1.e-14, atol=None): + if atol is None: + atol = 1.e-14 * getattr(a, 'unit', 1) + return quantity_allclose(a, b, rtol, atol) + + +def allclose_m8(a, b, rtol=1.e-8, atol=None): + if atol is None: + atol = 1.e-8 * getattr(a, 'unit', 1) + return quantity_allclose(a, b, rtol, atol) + + +def isclose_m14(val, ref): + return np.array([allclose_m14(v, r) for (v, r) in zip(val, ref)]) + + +def isclose_m8(val, ref): + return np.array([allclose_m8(v, r) for (v, r) in zip(val, ref)]) + + +def vvd(val, valok, dval, func, test, status): + """Mimic routine of erfa/src/t_erfa_c.c (to help copy & paste)""" + assert quantity_allclose(val, valok * val.unit, atol=dval * val.unit) + + +def test_gc2gd(): + """Test that we reproduce erfa/src/t_erfa_c.c t_gc2gd""" + x, y, z = (2e6, 3e6, 5.244e6) + + status = 0 # help for copy & paste of vvd + + location = EarthLocation.from_geocentric(x, y, z, u.m) + e, p, h = location.to_geodetic('WGS84') + e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m) + vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status) + vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status) + vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status) + + e, p, h = location.to_geodetic('GRS80') + e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m) + vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status) + vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status) + vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status) + + e, p, h = location.to_geodetic('WGS72') + e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m) + vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e3", status) + vvd(p, 0.97160181811015119, 1e-14, "eraGc2gd", "p3", status) + vvd(h, 333.27707261303181, 1e-8, "eraGc2gd", "h3", status) + + +def test_gd2gc(): + """Test that we reproduce erfa/src/t_erfa_c.c t_gd2gc""" + e = 3.1 * u.rad + p = -0.5 * u.rad + h = 2500.0 * u.m + + status = 0 # help for copy & paste of vvd + + location = EarthLocation.from_geodetic(e, p, h, ellipsoid='WGS84') + xyz = tuple(v.to(u.m) for v in location.to_geocentric()) + vvd(xyz[0], -5599000.5577049947, 1e-7, "eraGd2gc", "0/1", status) + vvd(xyz[1], 233011.67223479203, 1e-7, "eraGd2gc", "1/1", status) + vvd(xyz[2], -3040909.4706983363, 1e-7, "eraGd2gc", "2/1", status) + + location = EarthLocation.from_geodetic(e, p, h, ellipsoid='GRS80') + xyz = tuple(v.to(u.m) for v in location.to_geocentric()) + vvd(xyz[0], -5599000.5577260984, 1e-7, "eraGd2gc", "0/2", status) + vvd(xyz[1], 233011.6722356703, 1e-7, "eraGd2gc", "1/2", status) + vvd(xyz[2], -3040909.4706095476, 1e-7, "eraGd2gc", "2/2", status) + + location = EarthLocation.from_geodetic(e, p, h, ellipsoid='WGS72') + xyz = tuple(v.to(u.m) for v in location.to_geocentric()) + vvd(xyz[0], -5598998.7626301490, 1e-7, "eraGd2gc", "0/3", status) + vvd(xyz[1], 233011.5975297822, 1e-7, "eraGd2gc", "1/3", status) + vvd(xyz[2], -3040908.6861467111, 1e-7, "eraGd2gc", "2/3", status) + + +class TestInput(): + def setup(self): + self.lon = Longitude([0., 45., 90., 135., 180., -180, -90, -45], u.deg, + wrap_angle=180*u.deg) + self.lat = Latitude([+0., 30., 60., +90., -90., -60., -30., 0.], u.deg) + self.h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11., -.1], u.m) + self.location = EarthLocation.from_geodetic(self.lon, self.lat, self.h) + self.x, self.y, self.z = self.location.to_geocentric() + + def test_default_ellipsoid(self): + assert self.location.ellipsoid == EarthLocation._ellipsoid + + def test_geo_attributes(self): + assert all(np.all(_1 == _2) + for _1, _2 in zip(self.location.geodetic, + self.location.to_geodetic())) + assert all(np.all(_1 == _2) + for _1, _2 in zip(self.location.geocentric, + self.location.to_geocentric())) + + def test_attribute_classes(self): + """Test that attribute classes are correct (and not EarthLocation)""" + assert type(self.location.x) is u.Quantity + assert type(self.location.y) is u.Quantity + assert type(self.location.z) is u.Quantity + assert type(self.location.lon) is Longitude + assert type(self.location.lat) is Latitude + assert type(self.location.height) is u.Quantity + + def test_input(self): + """Check input is parsed correctly""" + + # units of length should be assumed geocentric + geocentric = EarthLocation(self.x, self.y, self.z) + assert np.all(geocentric == self.location) + geocentric2 = EarthLocation(self.x.value, self.y.value, self.z.value, + self.x.unit) + assert np.all(geocentric2 == self.location) + geodetic = EarthLocation(self.lon, self.lat, self.h) + assert np.all(geodetic == self.location) + geodetic2 = EarthLocation(self.lon.to_value(u.degree), + self.lat.to_value(u.degree), + self.h.to_value(u.m)) + assert np.all(geodetic2 == self.location) + geodetic3 = EarthLocation(self.lon, self.lat) + assert allclose_m14(geodetic3.lon.value, + self.location.lon.value) + assert allclose_m14(geodetic3.lat.value, + self.location.lat.value) + assert not np.any(isclose_m14(geodetic3.height.value, + self.location.height.value)) + geodetic4 = EarthLocation(self.lon, self.lat, self.h[-1]) + assert allclose_m14(geodetic4.lon.value, + self.location.lon.value) + assert allclose_m14(geodetic4.lat.value, + self.location.lat.value) + assert allclose_m14(geodetic4.height[-1].value, + self.location.height[-1].value) + assert not np.any(isclose_m14(geodetic4.height[:-1].value, + self.location.height[:-1].value)) + # check length unit preservation + geocentric5 = EarthLocation(self.x, self.y, self.z, u.pc) + assert geocentric5.unit is u.pc + assert geocentric5.x.unit is u.pc + assert geocentric5.height.unit is u.pc + assert allclose_m14(geocentric5.x.to_value(self.x.unit), self.x.value) + geodetic5 = EarthLocation(self.lon, self.lat, self.h.to(u.pc)) + assert geodetic5.unit is u.pc + assert geodetic5.x.unit is u.pc + assert geodetic5.height.unit is u.pc + assert allclose_m14(geodetic5.x.to_value(self.x.unit), self.x.value) + + def test_invalid_input(self): + """Check invalid input raises exception""" + # incomprehensible by either raises TypeError + with pytest.raises(TypeError): + EarthLocation(self.lon, self.y, self.z) + + # wrong units + with pytest.raises(u.UnitsError): + EarthLocation.from_geocentric(self.lon, self.lat, self.lat) + # inconsistent units + with pytest.raises(u.UnitsError): + EarthLocation.from_geocentric(self.h, self.lon, self.lat) + # floats without a unit + with pytest.raises(TypeError): + EarthLocation.from_geocentric(self.x.value, self.y.value, + self.z.value) + # inconsistent shape + with pytest.raises(ValueError): + EarthLocation.from_geocentric(self.x, self.y, self.z[:5]) + + # inconsistent units + with pytest.raises(u.UnitsError): + EarthLocation.from_geodetic(self.x, self.y, self.z) + # inconsistent shape + with pytest.raises(ValueError): + EarthLocation.from_geodetic(self.lon, self.lat, self.h[:5]) + + def test_slicing(self): + # test on WGS72 location, so we can check the ellipsoid is passed on + locwgs72 = EarthLocation.from_geodetic(self.lon, self.lat, self.h, + ellipsoid='WGS72') + loc_slice1 = locwgs72[4] + assert isinstance(loc_slice1, EarthLocation) + assert loc_slice1.unit is locwgs72.unit + assert loc_slice1.ellipsoid == locwgs72.ellipsoid == 'WGS72' + assert not loc_slice1.shape + with pytest.raises(TypeError): + loc_slice1[0] + with pytest.raises(IndexError): + len(loc_slice1) + + loc_slice2 = locwgs72[4:6] + assert isinstance(loc_slice2, EarthLocation) + assert len(loc_slice2) == 2 + assert loc_slice2.unit is locwgs72.unit + assert loc_slice2.ellipsoid == locwgs72.ellipsoid + assert loc_slice2.shape == (2,) + loc_x = locwgs72['x'] + assert type(loc_x) is u.Quantity + assert loc_x.shape == locwgs72.shape + assert loc_x.unit is locwgs72.unit + + def test_invalid_ellipsoid(self): + # unknown ellipsoid + with pytest.raises(ValueError): + EarthLocation.from_geodetic(self.lon, self.lat, self.h, + ellipsoid='foo') + with pytest.raises(TypeError): + EarthLocation(self.lon, self.lat, self.h, ellipsoid='foo') + + with pytest.raises(ValueError): + self.location.ellipsoid = 'foo' + + with pytest.raises(ValueError): + self.location.to_geodetic('foo') + + @pytest.mark.parametrize('ellipsoid', ELLIPSOIDS) + def test_ellipsoid(self, ellipsoid): + """Test that different ellipsoids are understood, and differ""" + # check that heights differ for different ellipsoids + # need different tolerance, since heights are relative to ~6000 km + lon, lat, h = self.location.to_geodetic(ellipsoid) + if ellipsoid == self.location.ellipsoid: + assert allclose_m8(h.value, self.h.value) + else: + # Some heights are very similar for some; some lon, lat identical. + assert not np.all(isclose_m8(h.value, self.h.value)) + + # given lon, lat, height, check that x,y,z differ + location = EarthLocation.from_geodetic(self.lon, self.lat, self.h, + ellipsoid=ellipsoid) + if ellipsoid == self.location.ellipsoid: + assert allclose_m14(location.z.value, self.z.value) + else: + assert not np.all(isclose_m14(location.z.value, self.z.value)) + + def test_to_value(self): + loc = self.location + loc_ndarray = loc.view(np.ndarray) + assert np.all(loc.value == loc_ndarray) + loc2 = self.location.to(u.km) + loc2_ndarray = np.empty_like(loc_ndarray) + for coo in 'x', 'y', 'z': + loc2_ndarray[coo] = loc_ndarray[coo] / 1000. + assert np.all(loc2.value == loc2_ndarray) + loc2_value = self.location.to_value(u.km) + assert np.all(loc2_value == loc2_ndarray) + + +def test_pickling(): + """Regression test against #4304.""" + el = EarthLocation(0.*u.m, 6000*u.km, 6000*u.km) + s = pickle.dumps(el) + el2 = pickle.loads(s) + assert el == el2 + + +def test_repr_latex(): + """ + Regression test for issue #4542 + """ + somelocation = EarthLocation(lon='149:3:57.9', lat='-31:16:37.3') + somelocation._repr_latex_() + somelocation2 = EarthLocation(lon=[1., 2.]*u.deg, lat=[-1., 9.]*u.deg) + somelocation2._repr_latex_() + + +@remote_data +def test_of_address(): + # no match + with pytest.raises(NameResolveError): + EarthLocation.of_address("lkjasdflkja") + + # just a location + loc = EarthLocation.of_address("New York, NY") + assert quantity_allclose(loc.lat, 40.7128*u.degree) + assert quantity_allclose(loc.lon, -74.0059*u.degree) + assert np.allclose(loc.height.value, 0.) + + # a location and height + loc = EarthLocation.of_address("New York, NY", get_height=True) + assert quantity_allclose(loc.lat, 40.7128*u.degree) + assert quantity_allclose(loc.lon, -74.0059*u.degree) + assert quantity_allclose(loc.height, 10.438659669*u.meter, atol=1.*u.cm) + + +def test_geodetic_tuple(): + lat = 2*u.deg + lon = 10*u.deg + height = 100*u.m + + el = EarthLocation.from_geodetic(lat=lat, lon=lon, height=height) + + res1 = el.to_geodetic() + res2 = el.geodetic + + assert res1.lat == res2.lat and quantity_allclose(res1.lat, lat) + assert res1.lon == res2.lon and quantity_allclose(res1.lon, lon) + assert res1.height == res2.height and quantity_allclose(res1.height, height) diff --git a/astropy/coordinates/tests/test_finite_difference_velocities.py b/astropy/coordinates/tests/test_finite_difference_velocities.py new file mode 100644 index 0000000..5c295dd --- /dev/null +++ b/astropy/coordinates/tests/test_finite_difference_velocities.py @@ -0,0 +1,229 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest +import numpy as np +from ...tests.helper import quantity_allclose + +from ... import units as u +from ... import constants +from ...time import Time +from ..builtin_frames import ICRS, AltAz, LSR, GCRS, Galactic, FK5 +from ..baseframe import frame_transform_graph +from ..sites import get_builtin_sites +from .. import (TimeAttribute, + FunctionTransformWithFiniteDifference, get_sun, + CartesianRepresentation, SphericalRepresentation, + CartesianDifferential, SphericalDifferential, + DynamicMatrixTransform) + +J2000 = Time('J2000') + + +@pytest.mark.parametrize("dt, symmetric", [(1*u.second, True), + (1*u.year, True), + (1*u.second, False), + (1*u.year, False)]) +def test_faux_lsr(dt, symmetric): + class LSR2(LSR): + obstime = TimeAttribute(default=J2000) + + @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, + ICRS, LSR2, finite_difference_dt=dt, + symmetric_finite_difference=symmetric) + def icrs_to_lsr(icrs_coo, lsr_frame): + dt = lsr_frame.obstime - J2000 + offset = lsr_frame.v_bary * dt.to(u.second) + return lsr_frame.realize_frame(icrs_coo.data.without_differentials() + offset) + + @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, + LSR2, ICRS, finite_difference_dt=dt, + symmetric_finite_difference=symmetric) + def lsr_to_icrs(lsr_coo, icrs_frame): + dt = lsr_frame.obstime - J2000 + offset = lsr_frame.v_bary * dt.to(u.second) + return icrs_frame.realize_frame(lsr_coo.data - offset) + + ic = ICRS(ra=12.3*u.deg, dec=45.6*u.deg, distance=7.8*u.au, + pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr, + radial_velocity=0*u.km/u.s) + lsrc = ic.transform_to(LSR2()) + + assert quantity_allclose(ic.cartesian.xyz, lsrc.cartesian.xyz) + + idiff = ic.cartesian.differentials['s'] + ldiff = lsrc.cartesian.differentials['s'] + change = (ldiff.d_xyz - idiff.d_xyz).to(u.km/u.s) + totchange = np.sum(change**2)**0.5 + assert quantity_allclose(totchange, np.sum(lsrc.v_bary.d_xyz**2)**0.5) + + ic2 = ICRS(ra=120.3*u.deg, dec=45.6*u.deg, distance=7.8*u.au, + pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=10*u.marcsec/u.yr, + radial_velocity=1000*u.km/u.s) + lsrc2 = ic2.transform_to(LSR2()) + + tot = np.sum(lsrc2.cartesian.differentials['s'].d_xyz**2)**0.5 + assert np.abs(tot.to('km/s') - 1000*u.km/u.s) < 20*u.km/u.s + + +def test_faux_fk5_galactic(): + + from ..builtin_frames.galactic_transforms import fk5_to_gal, _gal_to_fk5 + + class Galactic2(Galactic): + pass + + dt = 1000*u.s + + @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, + FK5, Galactic2, finite_difference_dt=dt, + symmetric_finite_difference=True, + finite_difference_frameattr_name=None) + def fk5_to_gal2(fk5_coo, gal_frame): + trans = DynamicMatrixTransform(fk5_to_gal, FK5, Galactic2) + return trans(fk5_coo, gal_frame) + + @frame_transform_graph.transform(FunctionTransformWithFiniteDifference, + Galactic2, ICRS, finite_difference_dt=dt, + symmetric_finite_difference=True, + finite_difference_frameattr_name=None) + def gal2_to_fk5(gal_coo, fk5_frame): + trans = DynamicMatrixTransform(_gal_to_fk5, Galactic2, FK5) + return trans(gal_coo, fk5_frame) + + c1 = FK5(ra=150*u.deg, dec=-17*u.deg, radial_velocity=83*u.km/u.s, + pm_ra_cosdec=-41*u.mas/u.yr, pm_dec=16*u.mas/u.yr, + distance=150*u.pc) + c2 = c1.transform_to(Galactic2) + c3 = c1.transform_to(Galactic) + + # compare the matrix and finite-difference calculations + assert quantity_allclose(c2.pm_l_cosb, c3.pm_l_cosb, rtol=1e-4) + assert quantity_allclose(c2.pm_b, c3.pm_b, rtol=1e-4) + + +def test_gcrs_diffs(): + time = Time('J2017') + gf = GCRS(obstime=time) + sung = get_sun(time) # should have very little vhelio + + # qtr-year off sun location should be the direction of ~ maximal vhelio + qtrsung = get_sun(time-.25*u.year) + + # now we use those essentially as directions where the velocities should + # be either maximal or minimal - with or perpendiculat to Earh's orbit + msungr = CartesianRepresentation(-sung.cartesian.xyz).represent_as(SphericalRepresentation) + suni = ICRS(ra=msungr.lon, dec=msungr.lat, distance=100*u.au, + pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr, + radial_velocity=0*u.km/u.s) + qtrsuni = ICRS(ra=qtrsung.ra, dec=qtrsung.dec, distance=100*u.au, + pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr, + radial_velocity=0*u.km/u.s) + + # Now we transform those parallel- and perpendicular-to Earth's orbit + # directions to GCRS, which should shift the velocity to either include + # the Earth's velocity vector, or not (for parallel and perpendicular, + # respectively). + sung = suni.transform_to(gf) + qtrsung = qtrsuni.transform_to(gf) + + # should be high along the ecliptic-not-sun sun axis and + # low along the sun axis + assert np.abs(qtrsung.radial_velocity) > 30*u.km/u.s + assert np.abs(qtrsung.radial_velocity) < 40*u.km/u.s + assert np.abs(sung.radial_velocity) < 1*u.km/u.s + + suni2 = sung.transform_to(ICRS) + assert np.all(np.abs(suni2.data.differentials['s'].d_xyz) < 3e-5*u.km/u.s) + qtrisun2 = qtrsung.transform_to(ICRS) + assert np.all(np.abs(qtrisun2.data.differentials['s'].d_xyz) < 3e-5*u.km/u.s) + + +def test_altaz_diffs(): + time = Time('J2015') + np.linspace(-1, 1, 1000)*u.day + loc = get_builtin_sites()['greenwich'] + aa = AltAz(obstime=time, location=loc) + + icoo = ICRS(np.zeros_like(time)*u.deg, 10*u.deg, 100*u.au, + pm_ra_cosdec=np.zeros_like(time)*u.marcsec/u.yr, + pm_dec=0*u.marcsec/u.yr, + radial_velocity=0*u.km/u.s) + + acoo = icoo.transform_to(aa) + + # Make sure the change in radial velocity over ~2 days isn't too much + # more than the rotation speed of the Earth - some excess is expected + # because the orbit also shifts the RV, but it should be pretty small + # over this short a time. + assert np.ptp(acoo.radial_velocity)/2 < (2*np.pi*constants.R_earth/u.day)*1.2 # MAGIC NUMBER + + cdiff = acoo.data.differentials['s'].represent_as(CartesianDifferential, + acoo.data) + + # The "total" velocity should be > c, because the *tangential* velocity + # isn't a True velocity, but rather an induced velocity due to the Earth's + # rotation at a distance of 100 AU + assert np.all(np.sum(cdiff.d_xyz**2, axis=0)**0.5 > constants.c) + + +_xfail = pytest.mark.xfail + + +@pytest.mark.parametrize('distance', [1000*u.au, + 10*u.pc, + 10*u.kpc, + 100*u.kpc]) +def test_numerical_limits(distance): + """ + Tests the numerical stability of the default settings for the finite + difference transformation calculation. This is *known* to fail for at + >~1kpc, but this may be improved in future versions. + """ + + if distance.unit == u.kpc: + # pytest.mark.parametrize syntax changed in pytest 3.1 to handle + # directly marking xfails, thus the workaround below to support + # pytest <3.1 for the 2.0.x LTS + pytest.xfail() + + time = Time('J2017') + np.linspace(-.5, .5, 100)*u.year + + icoo = ICRS(ra=0*u.deg, dec=10*u.deg, distance=distance, + pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr, + radial_velocity=0*u.km/u.s) + gcoo = icoo.transform_to(GCRS(obstime=time)) + rv = gcoo.radial_velocity.to('km/s') + + # if its a lot bigger than this - ~the maximal velocity shift along + # the direction above with a small allowance for noise - finite-difference + # rounding errors have ruined the calculation + assert np.ptp(rv) < 65*u.km/u.s + + +def diff_info_plot(frame, time): + """ + Useful for plotting a frame with multiple times. *Not* used in the testing + suite per se, but extremely useful for interactive plotting of results from + tests in this module. + """ + from matplotlib import pyplot as plt + + fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(20, 12)) + ax1.plot_date(time.plot_date, frame.data.differentials['s'].d_xyz.to(u.km/u.s).T, fmt='-') + ax1.legend(['x', 'y', 'z']) + + ax2.plot_date(time.plot_date, np.sum(frame.data.differentials['s'].d_xyz.to(u.km/u.s)**2, axis=0)**0.5, fmt='-') + ax2.set_title('total') + + sd = frame.data.differentials['s'].represent_as(SphericalDifferential, frame.data) + + ax3.plot_date(time.plot_date, sd.d_distance.to(u.km/u.s), fmt='-') + ax3.set_title('radial') + + ax4.plot_date(time.plot_date, sd.d_lat.to(u.marcsec/u.yr), fmt='-', label='lat') + ax4.plot_date(time.plot_date, sd.d_lon.to(u.marcsec/u.yr), fmt='-', label='lon') + + return fig diff --git a/astropy/coordinates/tests/test_formatting.py b/astropy/coordinates/tests/test_formatting.py new file mode 100644 index 0000000..7d169f5 --- /dev/null +++ b/astropy/coordinates/tests/test_formatting.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- + +# TEST_UNICODE_LITERALS +""" +Tests the Angle string formatting capabilities. SkyCoord formatting is in +test_sky_coord +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from ...extern import six + +from ..angles import Angle +from ... import units as u + + +def test_to_string_precision(): + # There are already some tests in test_api.py, but this is a regression + # test for the bug in issue #1319 which caused incorrect formatting of the + # seconds for precision=0 + + angle = Angle(-1.23456789, unit=u.degree) + + assert angle.to_string(precision=3) == '-1d14m04.444s' + assert angle.to_string(precision=1) == '-1d14m04.4s' + assert angle.to_string(precision=0) == '-1d14m04s' + + angle2 = Angle(-1.23456789, unit=u.hourangle) + + assert angle2.to_string(precision=3, unit=u.hour) == '-1h14m04.444s' + assert angle2.to_string(precision=1, unit=u.hour) == '-1h14m04.4s' + assert angle2.to_string(precision=0, unit=u.hour) == '-1h14m04s' + + +def test_to_string_decimal(): + + # There are already some tests in test_api.py, but this is a regression + # test for the bug in issue #1323 which caused decimal formatting to not + # work + + angle1 = Angle(2., unit=u.degree) + + assert angle1.to_string(decimal=True, precision=3) == '2.000' + assert angle1.to_string(decimal=True, precision=1) == '2.0' + assert angle1.to_string(decimal=True, precision=0) == '2' + + angle2 = Angle(3., unit=u.hourangle) + + assert angle2.to_string(decimal=True, precision=3) == '3.000' + assert angle2.to_string(decimal=True, precision=1) == '3.0' + assert angle2.to_string(decimal=True, precision=0) == '3' + + angle3 = Angle(4., unit=u.radian) + + assert angle3.to_string(decimal=True, precision=3) == '4.000' + assert angle3.to_string(decimal=True, precision=1) == '4.0' + assert angle3.to_string(decimal=True, precision=0) == '4' + + +def test_to_string_formats(): + a = Angle(1.113355, unit=u.deg) + assert a.to_string(format='latex') == r'$1^\circ06{}^\prime48.078{}^{\prime\prime}$' + assert a.to_string(format='unicode') == '1°06′48.078″' + + a = Angle(1.113355, unit=u.hour) + assert a.to_string(format='latex') == r'$1^\mathrm{h}06^\mathrm{m}48.078^\mathrm{s}$' + assert a.to_string(format='unicode') == '1ʰ06ᵐ48.078ˢ' + + a = Angle(1.113355, unit=u.radian) + assert a.to_string(format='latex') == r'$1.11336\mathrm{rad}$' + assert a.to_string(format='unicode') == '1.11336rad' + + +def test_to_string_fields(): + a = Angle(1.113355, unit=u.deg) + assert a.to_string(fields=1) == r'1d' + assert a.to_string(fields=2) == r'1d07m' + assert a.to_string(fields=3) == r'1d06m48.078s' + + +def test_to_string_padding(): + a = Angle(0.5653, unit=u.deg) + assert a.to_string(unit='deg', sep=':', pad=True) == r'00:33:55.08' + + # Test to make sure negative angles are padded correctly + a = Angle(-0.5653, unit=u.deg) + assert a.to_string(unit='deg', sep=':', pad=True) == r'-00:33:55.08' + + +def test_sexagesimal_rounding_up(): + a = Angle(359.9999999999, unit=u.deg) + + assert a.to_string(precision=None) == '360d00m00s' + assert a.to_string(precision=4) == '360d00m00.0000s' + assert a.to_string(precision=5) == '360d00m00.00000s' + assert a.to_string(precision=6) == '360d00m00.000000s' + assert a.to_string(precision=7) == '359d59m59.9999996s' + + a = Angle(3.999999, unit=u.deg) + assert a.to_string(fields=2, precision=None) == '4d00m' + assert a.to_string(fields=2, precision=1) == '4d00m' + assert a.to_string(fields=2, precision=5) == '4d00m' + assert a.to_string(fields=1, precision=1) == '4d' + assert a.to_string(fields=1, precision=5) == '4d' + + +def test_to_string_scalar(): + a = Angle(1.113355, unit=u.deg) + assert isinstance(a.to_string(), six.text_type) + + +def test_to_string_radian_with_precision(): + """ + Regression test for a bug that caused ``to_string`` to crash for angles in + radians when specifying the precision. + """ + + # Check that specifying the precision works + a = Angle(3., unit=u.rad) + assert a.to_string(precision=3, sep='fromunit') == '3.000rad' + + +def test_sexagesimal_round_down(): + a1 = Angle(1, u.deg).to(u.hourangle) + a2 = Angle(2, u.deg) + assert a1.to_string() == '0h04m00s' + assert a2.to_string() == '2d00m00s' + + +def test_to_string_fields_colon(): + a = Angle(1.113355, unit=u.deg) + assert a.to_string(fields=2, sep=':') == '1:07' + assert a.to_string(fields=3, sep=':') == '1:06:48.078' + assert a.to_string(fields=1, sep=':') == '1' diff --git a/astropy/coordinates/tests/test_frames.py b/astropy/coordinates/tests/test_frames.py new file mode 100644 index 0000000..3657b09 --- /dev/null +++ b/astropy/coordinates/tests/test_frames.py @@ -0,0 +1,904 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from copy import deepcopy +import numpy as np + +from ... import units as u +from ...extern import six +from ...tests.helper import (catch_warnings, + pytest, quantity_allclose as allclose, + assert_quantity_allclose as assert_allclose) +from ...utils import OrderedDescriptorContainer +from ...utils.compat import NUMPY_LT_1_14 +from ...utils.exceptions import AstropyWarning +from .. import representation as r +from ..representation import REPRESENTATION_CLASSES + + +def setup_function(func): + func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES) + + +def teardown_function(func): + REPRESENTATION_CLASSES.clear() + REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG) + + +def test_frame_attribute_descriptor(): + """ Unit tests of the Attribute descriptor """ + from ..attributes import Attribute + + @six.add_metaclass(OrderedDescriptorContainer) + class TestAttributes(object): + attr_none = Attribute() + attr_2 = Attribute(default=2) + attr_3_attr2 = Attribute(default=3, secondary_attribute='attr_2') + attr_none_attr2 = Attribute(default=None, secondary_attribute='attr_2') + attr_none_nonexist = Attribute(default=None, secondary_attribute='nonexist') + + t = TestAttributes() + + # Defaults + assert t.attr_none is None + assert t.attr_2 == 2 + assert t.attr_3_attr2 == 3 + assert t.attr_none_attr2 == t.attr_2 + assert t.attr_none_nonexist is None # No default and non-existent secondary attr + + # Setting values via '_'-prefixed internal vars (as would normally done in __init__) + t._attr_none = 10 + assert t.attr_none == 10 + + t._attr_2 = 20 + assert t.attr_2 == 20 + assert t.attr_3_attr2 == 3 + assert t.attr_none_attr2 == t.attr_2 + + t._attr_none_attr2 = 40 + assert t.attr_none_attr2 == 40 + + # Make sure setting values via public attribute fails + with pytest.raises(AttributeError) as err: + t.attr_none = 5 + assert 'Cannot set frame attribute' in str(err) + + +def test_frame_subclass_attribute_descriptor(): + from ..builtin_frames import FK4 + from ..attributes import Attribute, TimeAttribute + from astropy.time import Time + + _EQUINOX_B1980 = Time('B1980', scale='tai') + + class MyFK4(FK4): + # equinox inherited from FK4, obstime overridden, and newattr is new + obstime = TimeAttribute(default=_EQUINOX_B1980) + newattr = Attribute(default='newattr') + + mfk4 = MyFK4() + assert mfk4.equinox.value == 'B1950.000' + assert mfk4.obstime.value == 'B1980.000' + assert mfk4.newattr == 'newattr' + + assert set(mfk4.get_frame_attr_names()) == set(['equinox', 'obstime', 'newattr']) + + mfk4 = MyFK4(equinox='J1980.0', obstime='J1990.0', newattr='world') + assert mfk4.equinox.value == 'J1980.000' + assert mfk4.obstime.value == 'J1990.000' + assert mfk4.newattr == 'world' + + +def test_create_data_frames(): + from ..builtin_frames import ICRS + + # from repr + i1 = ICRS(r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc)) + i2 = ICRS(r.UnitSphericalRepresentation(lon=1*u.deg, lat=2*u.deg)) + + # from preferred name + i3 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.kpc) + i4 = ICRS(ra=1*u.deg, dec=2*u.deg) + + assert i1.data.lat == i3.data.lat + assert i1.data.lon == i3.data.lon + assert i1.data.distance == i3.data.distance + + assert i2.data.lat == i4.data.lat + assert i2.data.lon == i4.data.lon + + # now make sure the preferred names work as properties + assert_allclose(i1.ra, i3.ra) + assert_allclose(i2.ra, i4.ra) + assert_allclose(i1.distance, i3.distance) + + with pytest.raises(AttributeError): + i1.ra = [11.]*u.deg + + +def test_create_orderered_data(): + from ..builtin_frames import ICRS, Galactic, AltAz + + TOL = 1e-10*u.deg + + i = ICRS(1*u.deg, 2*u.deg) + assert (i.ra - 1*u.deg) < TOL + assert (i.dec - 2*u.deg) < TOL + + g = Galactic(1*u.deg, 2*u.deg) + assert (g.l - 1*u.deg) < TOL + assert (g.b - 2*u.deg) < TOL + + a = AltAz(1*u.deg, 2*u.deg) + assert (a.az - 1*u.deg) < TOL + assert (a.alt - 2*u.deg) < TOL + + with pytest.raises(TypeError): + ICRS(1*u.deg, 2*u.deg, 1*u.deg, 2*u.deg) + + with pytest.raises(TypeError): + sph = r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc) + ICRS(sph, 1*u.deg, 2*u.deg) + + +def test_create_nodata_frames(): + from ..builtin_frames import ICRS, FK4, FK5 + + i = ICRS() + assert len(i.get_frame_attr_names()) == 0 + + f5 = FK5() + assert f5.equinox == FK5.get_frame_attr_names()['equinox'] + + f4 = FK4() + assert f4.equinox == FK4.get_frame_attr_names()['equinox'] + + # obstime is special because it's a property that uses equinox if obstime is not set + assert f4.obstime in (FK4.get_frame_attr_names()['obstime'], + FK4.get_frame_attr_names()['equinox']) + + +def test_no_data_nonscalar_frames(): + from ..builtin_frames import AltAz + from astropy.time import Time + a1 = AltAz(obstime=Time('2012-01-01') + np.arange(10.) * u.day, + temperature=np.ones((3, 1)) * u.deg_C) + assert a1.obstime.shape == (3, 10) + assert a1.temperature.shape == (3, 10) + assert a1.shape == (3, 10) + with pytest.raises(ValueError) as exc: + AltAz(obstime=Time('2012-01-01') + np.arange(10.) * u.day, + temperature=np.ones((3,)) * u.deg_C) + assert 'inconsistent shapes' in str(exc) + + +def test_frame_repr(): + from ..builtin_frames import ICRS, FK5 + + i = ICRS() + assert repr(i) == '' + + f5 = FK5() + assert repr(f5).startswith('').format(' 1., 2.' if NUMPY_LT_1_14 + else '1., 2.') + assert repr(i3) == ('').format(' 1., 2., 3.' if NUMPY_LT_1_14 + else '1., 2., 3.') + + # try with arrays + i2 = ICRS(ra=[1.1, 2.1]*u.deg, dec=[2.1, 3.1]*u.deg) + i3 = ICRS(ra=[1.1, 2.1]*u.deg, dec=[-15.6, 17.1]*u.deg, distance=[11., 21.]*u.kpc) + + assert repr(i2) == ('').format('( 1.1, 2.1), ( 2.1, 3.1)' + if NUMPY_LT_1_14 else + '(1.1, 2.1), (2.1, 3.1)') + + if NUMPY_LT_1_14: + assert repr(i3) == ('') + else: + assert repr(i3) == ('') + + +def test_frame_repr_vels(): + from ..builtin_frames import ICRS + + i = ICRS(ra=1*u.deg, dec=2*u.deg, + pm_ra_cosdec=1*u.marcsec/u.yr, pm_dec=2*u.marcsec/u.yr) + + # unit comes out as mas/yr because of the preferred units defined in the + # frame RepresentationMapping + assert repr(i) == ('').format(' 1., 2.' if NUMPY_LT_1_14 else + '1., 2.') + + +def test_converting_units(): + import re + from ..baseframe import RepresentationMapping + from ..builtin_frames import ICRS, FK5 + + # this is a regular expression that with split (see below) removes what's + # the decimal point to fix rounding problems + rexrepr = re.compile(r'(.*?=\d\.).*?( .*?=\d\.).*?( .*)') + + # Use values that aren't subject to rounding down to X.9999... + i2 = ICRS(ra=2.*u.deg, dec=2.*u.deg) + i2_many = ICRS(ra=[2., 4.]*u.deg, dec=[2., -8.1]*u.deg) + + # converting from FK5 to ICRS and back changes the *internal* representation, + # but it should still come out in the preferred form + + i4 = i2.transform_to(FK5).transform_to(ICRS) + i4_many = i2_many.transform_to(FK5).transform_to(ICRS) + + ri2 = ''.join(rexrepr.split(repr(i2))) + ri4 = ''.join(rexrepr.split(repr(i4))) + assert ri2 == ri4 + assert i2.data.lon.unit != i4.data.lon.unit # Internal repr changed + + ri2_many = ''.join(rexrepr.split(repr(i2_many))) + ri4_many = ''.join(rexrepr.split(repr(i4_many))) + + assert ri2_many == ri4_many + assert i2_many.data.lon.unit != i4_many.data.lon.unit # Internal repr changed + + # but that *shouldn't* hold if we turn off units for the representation + class FakeICRS(ICRS): + frame_specific_representation_info = { + 'spherical': {'names': ('ra', 'dec', 'distance'), + 'units': (None, None, None)}, + 'unitspherical': {'names': ('ra', 'dec'), + 'units': (None, None)} + } + + frame_specific_representation_info = { + 'spherical': [RepresentationMapping('lon', 'ra', u.hourangle), + RepresentationMapping('lat', 'dec', None), + RepresentationMapping('distance', 'distance')] # should fall back to default of None unit + } + frame_specific_representation_info['unitspherical'] = \ + frame_specific_representation_info['spherical'] + + fi = FakeICRS(i4.data) + ri2 = ''.join(rexrepr.split(repr(i2))) + rfi = ''.join(rexrepr.split(repr(fi))) + rfi = re.sub('FakeICRS', 'ICRS', rfi) # Force frame name to match + assert ri2 != rfi + + # the attributes should also get the right units + assert i2.dec.unit == i4.dec.unit + # unless no/explicitly given units + assert i2.dec.unit != fi.dec.unit + assert i2.ra.unit != fi.ra.unit + assert fi.ra.unit == u.hourangle + + +def test_realizing(): + from ..builtin_frames import ICRS, FK5 + from ...time import Time + + rep = r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc) + + i = ICRS() + i2 = i.realize_frame(rep) + + assert not i.has_data + assert i2.has_data + + f = FK5(equinox=Time('J2001', scale='utc')) + f2 = f.realize_frame(rep) + + assert not f.has_data + assert f2.has_data + + assert f2.equinox == f.equinox + assert f2.equinox != FK5.get_frame_attr_names()['equinox'] + + # Check that a nicer error message is returned: + with pytest.raises(TypeError) as excinfo: + f.realize_frame(f.representation) + + assert ('Class passed as data instead of a representation' in + excinfo.value.args[0]) + +def test_replicating(): + from ..builtin_frames import ICRS, AltAz + from ...time import Time + + i = ICRS(ra=[1]*u.deg, dec=[2]*u.deg) + + icopy = i.replicate(copy=True) + irepl = i.replicate(copy=False) + i.data._lat[:] = 0*u.deg + assert np.all(i.data.lat == irepl.data.lat) + assert np.all(i.data.lat != icopy.data.lat) + + iclone = i.replicate_without_data() + assert i.has_data + assert not iclone.has_data + + aa = AltAz(alt=1*u.deg, az=2*u.deg, obstime=Time('J2000')) + aaclone = aa.replicate_without_data(obstime=Time('J2001')) + assert not aaclone.has_data + assert aa.obstime != aaclone.obstime + assert aa.pressure == aaclone.pressure + assert aa.obswl == aaclone.obswl + + +def test_getitem(): + from ..builtin_frames import ICRS + + rep = r.SphericalRepresentation( + [1, 2, 3]*u.deg, [4, 5, 6]*u.deg, [7, 8, 9]*u.kpc) + + i = ICRS(rep) + assert len(i.ra) == 3 + + iidx = i[1:] + assert len(iidx.ra) == 2 + + iidx2 = i[0] + assert iidx2.ra.isscalar + + +def test_transform(): + """ + This test just makes sure the transform architecture works, but does *not* + actually test all the builtin transforms themselves are accurate + """ + from ..builtin_frames import ICRS, FK4, FK5, Galactic + from ...time import Time + + i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg) + f = i.transform_to(FK5) + i2 = f.transform_to(ICRS) + + assert i2.data.__class__ == r.UnitSphericalRepresentation + + assert_allclose(i.ra, i2.ra) + assert_allclose(i.dec, i2.dec) + + i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc) + f = i.transform_to(FK5) + i2 = f.transform_to(ICRS) + + assert i2.data.__class__ != r.UnitSphericalRepresentation + + f = FK5(ra=1*u.deg, dec=2*u.deg, equinox=Time('J2001', scale='utc')) + f4 = f.transform_to(FK4) + f4_2 = f.transform_to(FK4(equinox=f.equinox)) + + # make sure attributes are copied over correctly + assert f4.equinox == FK4.get_frame_attr_names()['equinox'] + assert f4_2.equinox == f.equinox + + # make sure self-transforms also work + i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg) + i2 = i.transform_to(ICRS) + + assert_allclose(i.ra, i2.ra) + assert_allclose(i.dec, i2.dec) + + f = FK5(ra=1*u.deg, dec=2*u.deg, equinox=Time('J2001', scale='utc')) + f2 = f.transform_to(FK5) # default equinox, so should be *different* + assert f2.equinox == FK5().equinox + with pytest.raises(AssertionError): + assert_allclose(f.ra, f2.ra) + with pytest.raises(AssertionError): + assert_allclose(f.dec, f2.dec) + + # finally, check Galactic round-tripping + i1 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg) + i2 = i1.transform_to(Galactic).transform_to(ICRS) + + assert_allclose(i1.ra, i2.ra) + assert_allclose(i1.dec, i2.dec) + + +def test_transform_to_nonscalar_nodata_frame(): + # https://github.com/astropy/astropy/pull/5254#issuecomment-241592353 + from ..builtin_frames import ICRS, FK5 + from ...time import Time + times = Time('2016-08-23') + np.linspace(0, 10, 12)*u.day + coo1 = ICRS(ra=[[0.], [10.], [20.]]*u.deg, + dec=[[-30.], [30.], [60.]]*u.deg) + coo2 = coo1.transform_to(FK5(equinox=times)) + assert coo2.shape == (3, 12) + + +def test_sep(): + from ..builtin_frames import ICRS + + i1 = ICRS(ra=0*u.deg, dec=1*u.deg) + i2 = ICRS(ra=0*u.deg, dec=2*u.deg) + + sep = i1.separation(i2) + assert sep.deg == 1 + + i3 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc) + i4 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[4, 5]*u.kpc) + + sep3d = i3.separation_3d(i4) + assert_allclose(sep3d.to(u.kpc), np.array([1, 1])*u.kpc) + + # check that it works even with velocities + i5 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc, + pm_ra_cosdec=[1, 2]*u.mas/u.yr, pm_dec=[3, 4]*u.mas/u.yr, + radial_velocity=[5, 6]*u.km/u.s) + i6 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[7, 8]*u.kpc, + pm_ra_cosdec=[1, 2]*u.mas/u.yr, pm_dec=[3, 4]*u.mas/u.yr, + radial_velocity=[5, 6]*u.km/u.s) + + sep3d = i5.separation_3d(i6) + assert_allclose(sep3d.to(u.kpc), np.array([2, 2])*u.kpc) + +def test_time_inputs(): + """ + Test validation and conversion of inputs for equinox and obstime attributes. + """ + from ...time import Time + from ..builtin_frames import FK4 + + c = FK4(1 * u.deg, 2 * u.deg, equinox='J2001.5', obstime='2000-01-01 12:00:00') + assert c.equinox == Time('J2001.5') + assert c.obstime == Time('2000-01-01 12:00:00') + + with pytest.raises(ValueError) as err: + c = FK4(1 * u.deg, 2 * u.deg, equinox=1.5) + assert 'Invalid time input' in str(err) + + with pytest.raises(ValueError) as err: + c = FK4(1 * u.deg, 2 * u.deg, obstime='hello') + assert 'Invalid time input' in str(err) + + # A vector time should work if the shapes match, but we don't automatically + # broadcast the basic data (just like time). + FK4([1, 2] * u.deg, [2, 3] * u.deg, obstime=['J2000', 'J2001']) + with pytest.raises(ValueError) as err: + FK4(1 * u.deg, 2 * u.deg, obstime=['J2000', 'J2001']) + assert 'shape' in str(err) + + +def test_is_frame_attr_default(): + """ + Check that the `is_frame_attr_default` machinery works as expected + """ + from ...time import Time + from ..builtin_frames import FK5 + + c1 = FK5(ra=1*u.deg, dec=1*u.deg) + c2 = FK5(ra=1*u.deg, dec=1*u.deg, equinox=FK5.get_frame_attr_names()['equinox']) + c3 = FK5(ra=1*u.deg, dec=1*u.deg, equinox=Time('J2001.5')) + + assert c1.equinox == c2.equinox + assert c1.equinox != c3.equinox + + assert c1.is_frame_attr_default('equinox') + assert not c2.is_frame_attr_default('equinox') + assert not c3.is_frame_attr_default('equinox') + + c4 = c1.realize_frame(r.UnitSphericalRepresentation(3*u.deg, 4*u.deg)) + c5 = c2.realize_frame(r.UnitSphericalRepresentation(3*u.deg, 4*u.deg)) + + assert c4.is_frame_attr_default('equinox') + assert not c5.is_frame_attr_default('equinox') + + +def test_altaz_attributes(): + from ...time import Time + from .. import EarthLocation, AltAz + + aa = AltAz(1*u.deg, 2*u.deg) + assert aa.obstime is None + assert aa.location is None + + aa2 = AltAz(1*u.deg, 2*u.deg, obstime='J2000') + assert aa2.obstime == Time('J2000') + + aa3 = AltAz(1*u.deg, 2*u.deg, location=EarthLocation(0*u.deg, 0*u.deg, 0*u.m)) + assert isinstance(aa3.location, EarthLocation) + + +def test_representation(): + """ + Test the getter and setter properties for `representation` + """ + from ..builtin_frames import ICRS + + # Create the frame object. + icrs = ICRS(ra=1*u.deg, dec=1*u.deg) + data = icrs.data + + # Create some representation objects. + icrs_cart = icrs.cartesian + icrs_spher = icrs.spherical + + # Testing when `_representation` set to `CartesianRepresentation`. + icrs.representation = r.CartesianRepresentation + + assert icrs.representation == r.CartesianRepresentation + assert icrs_cart.x == icrs.x + assert icrs_cart.y == icrs.y + assert icrs_cart.z == icrs.z + assert icrs.data == data + + # Testing that an ICRS object in CartesianRepresentation must not have spherical attributes. + for attr in ('ra', 'dec', 'distance'): + with pytest.raises(AttributeError) as err: + getattr(icrs, attr) + assert 'object has no attribute' in str(err) + + # Testing when `_representation` set to `CylindricalRepresentation`. + icrs.representation = r.CylindricalRepresentation + + assert icrs.representation == r.CylindricalRepresentation + assert icrs.data == data + + # Testing setter input using text argument for spherical. + icrs.representation = 'spherical' + + assert icrs.representation is r.SphericalRepresentation + assert icrs_spher.lat == icrs.dec + assert icrs_spher.lon == icrs.ra + assert icrs_spher.distance == icrs.distance + assert icrs.data == data + + # Testing that an ICRS object in SphericalRepresentation must not have cartesian attributes. + for attr in ('x', 'y', 'z'): + with pytest.raises(AttributeError) as err: + getattr(icrs, attr) + assert 'object has no attribute' in str(err) + + # Testing setter input using text argument for cylindrical. + icrs.representation = 'cylindrical' + + assert icrs.representation is r.CylindricalRepresentation + assert icrs.data == data + + with pytest.raises(ValueError) as err: + icrs.representation = 'WRONG' + assert 'but must be a BaseRepresentation class' in str(err) + + with pytest.raises(ValueError) as err: + icrs.representation = ICRS + assert 'but must be a BaseRepresentation class' in str(err) + + +def test_represent_as(): + from ..builtin_frames import ICRS + + icrs = ICRS(ra=1*u.deg, dec=1*u.deg) + + cart1 = icrs.represent_as('cartesian') + cart2 = icrs.represent_as(r.CartesianRepresentation) + + cart1.x == cart2.x + cart1.y == cart2.y + cart1.z == cart2.z + + # now try with velocities + icrs = ICRS(ra=0*u.deg, dec=0*u.deg, distance=10*u.kpc, + pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr, + radial_velocity=1*u.km/u.s) + + # single string + rep2 = icrs.represent_as('cylindrical') + assert isinstance(rep2, r.CylindricalRepresentation) + assert isinstance(rep2.differentials['s'], r.CylindricalDifferential) + + # single class with positional in_frame_units, verify that warning raised + with catch_warnings() as w: + icrs.represent_as(r.CylindricalRepresentation, False) + assert len(w) == 1 + assert w[0].category == AstropyWarning + assert 'argument position' in str(w[0].message) + + # TODO: this should probably fail in the future once we figure out a better + # workaround for dealing with UnitSphericalRepresentation's with + # RadialDifferential's + # two classes + # rep2 = icrs.represent_as(r.CartesianRepresentation, + # r.SphericalCosLatDifferential) + # assert isinstance(rep2, r.CartesianRepresentation) + # assert isinstance(rep2.differentials['s'], r.SphericalCosLatDifferential) + + with pytest.raises(ValueError): + icrs.represent_as('odaigahara') + + +def test_shorthand_representations(): + from ..builtin_frames import ICRS + + rep = r.CartesianRepresentation([1, 2, 3]*u.pc) + dif = r.CartesianDifferential([1, 2, 3]*u.km/u.s) + rep = rep.with_differentials(dif) + + icrs = ICRS(rep) + + sph = icrs.spherical + assert isinstance(sph, r.SphericalRepresentation) + assert isinstance(sph.differentials['s'], r.SphericalDifferential) + + sph = icrs.sphericalcoslat + assert isinstance(sph, r.SphericalRepresentation) + assert isinstance(sph.differentials['s'], r.SphericalCosLatDifferential) + + +def test_dynamic_attrs(): + from ..builtin_frames import ICRS + c = ICRS(1*u.deg, 2*u.deg) + assert 'ra' in dir(c) + assert 'dec' in dir(c) + + with pytest.raises(AttributeError) as err: + c.blahblah + assert "object has no attribute 'blahblah'" in str(err) + + with pytest.raises(AttributeError) as err: + c.ra = 1 + assert "Cannot set any frame attribute" in str(err) + + c.blahblah = 1 + assert c.blahblah == 1 + + +def test_nodata_error(): + from ..builtin_frames import ICRS + + i = ICRS() + with pytest.raises(ValueError) as excinfo: + i.data + + assert 'does not have associated data' in str(excinfo.value) + + +def test_len0_data(): + from ..builtin_frames import ICRS + + i = ICRS([]*u.deg, []*u.deg) + assert i.has_data + repr(i) + + +def test_quantity_attributes(): + from ..builtin_frames import GCRS + + # make sure we can create a GCRS frame with valid inputs + GCRS(obstime='J2002', obsgeoloc=[1, 2, 3]*u.km, obsgeovel=[4, 5, 6]*u.km/u.s) + + # make sure it fails for invalid lovs or vels + with pytest.raises(TypeError): + GCRS(obsgeoloc=[1, 2, 3]) # no unit + with pytest.raises(u.UnitsError): + GCRS(obsgeoloc=[1, 2, 3]*u.km/u.s) # incorrect unit + with pytest.raises(ValueError): + GCRS(obsgeoloc=[1, 3]*u.km) # incorrect shape + + +def test_eloc_attributes(): + from .. import AltAz, ITRS, GCRS, EarthLocation + + el = EarthLocation(lon=12.3*u.deg, lat=45.6*u.deg, height=1*u.km) + it = ITRS(r.SphericalRepresentation(lon=12.3*u.deg, lat=45.6*u.deg, distance=1*u.km)) + gc = GCRS(ra=12.3*u.deg, dec=45.6*u.deg, distance=6375*u.km) + + el1 = AltAz(location=el).location + assert isinstance(el1, EarthLocation) + # these should match *exactly* because the EarthLocation + assert el1.lat == el.lat + assert el1.lon == el.lon + assert el1.height == el.height + + el2 = AltAz(location=it).location + assert isinstance(el2, EarthLocation) + # these should *not* match because giving something in Spherical ITRS is + # *not* the same as giving it as an EarthLocation: EarthLocation is on an + # elliptical geoid. So the longitude should match (because flattening is + # only along the z-axis), but latitude should not. Also, height is relative + # to the *surface* in EarthLocation, but the ITRS distance is relative to + # the center of the Earth + assert not allclose(el2.lat, it.spherical.lat) + assert allclose(el2.lon, it.spherical.lon) + assert el2.height < -6000*u.km + + el3 = AltAz(location=gc).location + # GCRS inputs implicitly get transformed to ITRS and then onto + # EarthLocation's elliptical geoid. So both lat and lon shouldn't match + assert isinstance(el3, EarthLocation) + assert not allclose(el3.lat, gc.dec) + assert not allclose(el3.lon, gc.ra) + assert np.abs(el3.height) < 500*u.km + + +def test_equivalent_frames(): + from .. import SkyCoord + from ..builtin_frames import ICRS, FK4, FK5, AltAz + + i = ICRS() + i2 = ICRS(1*u.deg, 2*u.deg) + assert i.is_equivalent_frame(i) + assert i.is_equivalent_frame(i2) + with pytest.raises(TypeError): + assert i.is_equivalent_frame(10) + with pytest.raises(TypeError): + assert i2.is_equivalent_frame(SkyCoord(i2)) + + f1 = FK5() + f2 = FK5(1*u.deg, 2*u.deg, equinox='J2000') + f3 = FK5(equinox='J2010') + f4 = FK4(equinox='J2010') + + assert f1.is_equivalent_frame(f1) + assert not i.is_equivalent_frame(f1) + assert f1.is_equivalent_frame(f2) + assert not f1.is_equivalent_frame(f3) + assert not f3.is_equivalent_frame(f4) + + aa1 = AltAz() + aa2 = AltAz(obstime='J2010') + + assert aa2.is_equivalent_frame(aa2) + assert not aa1.is_equivalent_frame(i) + assert not aa1.is_equivalent_frame(aa2) + + +def test_representation_subclass(): + + # Regression test for #3354 + + from ..builtin_frames import FK5 + + # Normally when instantiating a frame without a distance the frame will try + # and use UnitSphericalRepresentation internally instead of + # SphericalRepresentation. + frame = FK5(representation=r.SphericalRepresentation, ra=32 * u.deg, dec=20 * u.deg) + assert type(frame._data) == r.UnitSphericalRepresentation + assert frame.representation == r.SphericalRepresentation + + # If using a SphericalRepresentation class this used to not work, so we + # test here that this is now fixed. + class NewSphericalRepresentation(r.SphericalRepresentation): + attr_classes = r.SphericalRepresentation.attr_classes + + frame = FK5(representation=NewSphericalRepresentation, lon=32 * u.deg, lat=20 * u.deg) + assert type(frame._data) == r.UnitSphericalRepresentation + assert frame.representation == NewSphericalRepresentation + + # A similar issue then happened in __repr__ with subclasses of + # SphericalRepresentation. + assert repr(frame) == ("").format(' 32., 20.' if NUMPY_LT_1_14 + else '32., 20.') + + # A more subtle issue is when specifying a custom + # UnitSphericalRepresentation subclass for the data and + # SphericalRepresentation or a subclass for the representation. + + class NewUnitSphericalRepresentation(r.UnitSphericalRepresentation): + attr_classes = r.UnitSphericalRepresentation.attr_classes + + def __repr__(self): + return "" + + frame = FK5(NewUnitSphericalRepresentation(lon=32 * u.deg, lat=20 * u.deg), + representation=NewSphericalRepresentation) + + assert repr(frame) == "" + + +def test_getitem_representation(): + """ + Make sure current representation survives __getitem__ even if different + from data representation. + """ + from ..builtin_frames import ICRS + c = ICRS([1, 1] * u.deg, [2, 2] * u.deg) + c.representation = 'cartesian' + assert c[0].representation is r.CartesianRepresentation + + +def test_component_error_useful(): + """ + Check that a data-less frame gives useful error messages about not having + data when the attributes asked for are possible coordinate components + """ + from ..builtin_frames import ICRS + + i = ICRS() + + with pytest.raises(ValueError) as excinfo: + i.ra + assert 'does not have associated data' in str(excinfo.value) + + with pytest.raises(AttributeError) as excinfo1: + i.foobar + with pytest.raises(AttributeError) as excinfo2: + i.lon # lon is *not* the component name despite being the underlying representation's name + assert "object has no attribute 'foobar'" in str(excinfo1.value) + assert "object has no attribute 'lon'" in str(excinfo2.value) + + +def test_cache_clear(): + from ..builtin_frames import ICRS + + i = ICRS(1*u.deg, 2*u.deg) + + # Add an in frame units version of the rep to the cache. + repr(i) + + assert len(i.cache['representation']) == 2 + + i.cache.clear() + + assert len(i.cache['representation']) == 0 + + +def test_inplace_array(): + from ..builtin_frames import ICRS + + i = ICRS([[1, 2], [3, 4]]*u.deg, [[10, 20], [30, 40]]*u.deg) + + # Add an in frame units version of the rep to the cache. + repr(i) + + # Check that repr() has added a rep to the cache + assert len(i.cache['representation']) == 2 + + # Modify the data + i.data.lon[:, 0] = [100, 200]*u.deg + + # Clear the cache + i.cache.clear() + + # This will use a second (potentially cached rep) + assert_allclose(i.ra, [[100, 2], [200, 4]]*u.deg) + assert_allclose(i.dec, [[10, 20], [30, 40]]*u.deg) + + +def test_inplace_change(): + from ..builtin_frames import ICRS + + i = ICRS(1*u.deg, 2*u.deg) + + # Add an in frame units version of the rep to the cache. + repr(i) + + # Check that repr() has added a rep to the cache + assert len(i.cache['representation']) == 2 + + # Modify the data + i.data.lon[()] = 10*u.deg + + # Clear the cache + i.cache.clear() + + # This will use a second (potentially cached rep) + assert i.ra == 10 * u.deg + assert i.dec == 2 * u.deg + + +def test_representation_with_multiple_differentials(): + from ..builtin_frames import ICRS + + dif1 = r.CartesianDifferential([1, 2, 3]*u.km/u.s) + dif2 = r.CartesianDifferential([1, 2, 3]*u.km/u.s**2) + rep = r.CartesianRepresentation([1, 2, 3]*u.pc, + differentials={'s': dif1, 's2': dif2}) + + # check warning is raised for a scalar + with pytest.raises(ValueError): + ICRS(rep) diff --git a/astropy/coordinates/tests/test_frames_with_velocity.py b/astropy/coordinates/tests/test_frames_with_velocity.py new file mode 100644 index 0000000..b6e1186 --- /dev/null +++ b/astropy/coordinates/tests/test_frames_with_velocity.py @@ -0,0 +1,223 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest + +from ... import units as u +from ..builtin_frames import ICRS, Galactic, Galactocentric +from .. import builtin_frames as bf +from ...tests.helper import quantity_allclose +from ..errors import ConvertError +from .. import representation as r + +def test_api(): + # transform observed Barycentric velocities to full-space Galactocentric + gc_frame = Galactocentric() + icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg, distance=101*u.pc, + pm_ra_cosdec=21*u.mas/u.yr, pm_dec=-71*u.mas/u.yr, + radial_velocity=71*u.km/u.s) + icrs.transform_to(gc_frame) + + # transform a set of ICRS proper motions to Galactic + icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg, + pm_ra_cosdec=21*u.mas/u.yr, pm_dec=-71*u.mas/u.yr) + icrs.transform_to(Galactic) + + # transform a Barycentric RV to a GSR RV + icrs = ICRS(ra=151.*u.deg, dec=-16*u.deg, distance=1.*u.pc, + pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr, + radial_velocity=71*u.km/u.s) + icrs.transform_to(Galactocentric) + +all_kwargs = [ + dict(ra=37.4*u.deg, dec=-55.8*u.deg), + dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc), + dict(ra=37.4*u.deg, dec=-55.8*u.deg, + pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), + dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc, + pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), + dict(ra=37.4*u.deg, dec=-55.8*u.deg, + radial_velocity=105.7*u.km/u.s), + dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc, + radial_velocity=105.7*u.km/u.s), + dict(ra=37.4*u.deg, dec=-55.8*u.deg, + radial_velocity=105.7*u.km/u.s, + pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), + dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc, + pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr, + radial_velocity=105.7*u.km/u.s) +] + +@pytest.mark.parametrize('kwargs', all_kwargs) +def test_all_arg_options(kwargs): + # Above is a list of all possible valid combinations of arguments. + # Here we do a simple thing and just verify that passing them in, we have + # access to the relevant attributes from the resulting object + icrs = ICRS(**kwargs) + gal = icrs.transform_to(Galactic) + repr_gal = repr(gal) + + for k in kwargs: + getattr(icrs, k) + + if 'pm_ra_cosdec' in kwargs: # should have both + assert 'pm_l_cosb' in repr_gal + assert 'pm_b' in repr_gal + assert 'mas / yr' in repr_gal + + if 'radial_velocity' not in kwargs: + assert 'radial_velocity' not in repr_gal + + if 'radial_velocity' in kwargs: + assert 'radial_velocity' in repr_gal + assert 'km / s' in repr_gal + + if 'pm_ra_cosdec' not in kwargs: + assert 'pm_l_cosb' not in repr_gal + assert 'pm_b' not in repr_gal + +@pytest.mark.parametrize('cls,lon,lat', [ + [bf.ICRS, 'ra', 'dec'], [bf.FK4, 'ra', 'dec'], [bf.FK4NoETerms, 'ra', 'dec'], + [bf.FK5, 'ra', 'dec'], [bf.GCRS, 'ra', 'dec'], [bf.HCRS, 'ra', 'dec'], + [bf.LSR, 'ra', 'dec'], [bf.CIRS, 'ra', 'dec'], [bf.Galactic, 'l', 'b'], + [bf.AltAz, 'az', 'alt'], [bf.Supergalactic, 'sgl', 'sgb'], + [bf.GalacticLSR, 'l', 'b'], [bf.HeliocentricTrueEcliptic, 'lon', 'lat'], + [bf.GeocentricTrueEcliptic, 'lon', 'lat'], + [bf.BarycentricTrueEcliptic, 'lon', 'lat'], + [bf.PrecessedGeocentric, 'ra', 'dec'] +]) +def test_expected_arg_names(cls, lon, lat): + kwargs = {lon: 37.4*u.deg, lat: -55.8*u.deg, 'distance': 150*u.pc, + 'pm_{0}_cos{1}'.format(lon, lat): -21.2*u.mas/u.yr, + 'pm_{0}'.format(lat): 17.1*u.mas/u.yr, + 'radial_velocity': 105.7*u.km/u.s} + frame = cls(**kwargs) + + +# these data are extracted from the vizier copy of XHIP: +# http://vizier.u-strasbg.fr/viz-bin/VizieR-3?-source=+V/137A/XHIP +_xhip_head = """ +------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------ + R D pmRA pmDE Di pmGLon pmGLat RV U V W +HIP AJ2000 (deg) EJ2000 (deg) (mas/yr) (mas/yr) GLon (deg) GLat (deg) st (pc) (mas/yr) (mas/yr) (km/s) (km/s) (km/s) (km/s) +------ ------------ ------------ -------- -------- ------------ ------------ ------- -------- -------- ------- ------ ------ ------ +"""[1:-1] +_xhip_data = """ + 19 000.05331690 +38.30408633 -3.17 -15.37 112.00026470 -23.47789171 247.12 -6.40 -14.33 6.30 7.3 2.0 -17.9 + 20 000.06295067 +23.52928427 36.11 -22.48 108.02779304 -37.85659811 95.90 29.35 -30.78 37.80 -19.3 16.1 -34.2 + 21 000.06623581 +08.00723430 61.48 -0.23 101.69697120 -52.74179515 183.68 58.06 -20.23 -11.72 -45.2 -30.9 -1.3 + 24917 080.09698238 -33.39874984 -4.30 13.40 236.92324669 -32.58047131 107.38 -14.03 -1.15 36.10 -22.4 -21.3 -19.9 + 59207 182.13915108 +65.34963517 18.17 5.49 130.04157185 51.18258601 56.00 -18.98 -0.49 5.70 1.5 6.1 4.4 + 87992 269.60730667 +36.87462906 -89.58 72.46 62.98053142 25.90148234 129.60 45.64 105.79 -4.00 -39.5 -15.8 56.7 +115110 349.72322473 -28.74087144 48.86 -9.25 23.00447250 -69.52799804 116.87 -8.37 -49.02 15.00 -16.8 -12.2 -23.6 +"""[1:-1] + +# in principal we could parse the above as a table, but doing it "manually" +# makes this test less tied to Table working correctly + + +@pytest.mark.parametrize('hip,ra,dec,pmra,pmdec,glon,glat,dist,pmglon,pmglat,rv,U,V,W', + [[float(val) for val in row.split()] for row in _xhip_data.split('\n')]) +def test_xhip_galactic(hip, ra, dec, pmra, pmdec, glon, glat, dist, pmglon, pmglat, rv, U, V, W): + i = ICRS(ra*u.deg, dec*u.deg, dist*u.pc, + pm_ra_cosdec=pmra*u.marcsec/u.yr, pm_dec=pmdec*u.marcsec/u.yr, + radial_velocity=rv*u.km/u.s) + g = i.transform_to(Galactic) + + # precision is limited by 2-deciimal digit string representation of pms + assert quantity_allclose(g.pm_l_cosb, pmglon*u.marcsec/u.yr, atol=.01*u.marcsec/u.yr) + assert quantity_allclose(g.pm_b, pmglat*u.marcsec/u.yr, atol=.01*u.marcsec/u.yr) + + # make sure UVW also makes sense + uvwg = g.cartesian.differentials['s'] + # precision is limited by 1-decimal digit string representation of vels + assert quantity_allclose(uvwg.d_x, U*u.km/u.s, atol=.1*u.km/u.s) + assert quantity_allclose(uvwg.d_y, V*u.km/u.s, atol=.1*u.km/u.s) + assert quantity_allclose(uvwg.d_z, W*u.km/u.s, atol=.1*u.km/u.s) + +@pytest.mark.parametrize('kwargs,expect_success', [ + [dict(ra=37.4*u.deg, dec=-55.8*u.deg), False], + [dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc), True], + [dict(ra=37.4*u.deg, dec=-55.8*u.deg, + pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), False], + [dict(ra=37.4*u.deg, dec=-55.8*u.deg, radial_velocity=105.7*u.km/u.s), False], + [dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc, + radial_velocity=105.7*u.km/u.s), False], + [dict(ra=37.4*u.deg, dec=-55.8*u.deg, + radial_velocity=105.7*u.km/u.s, + pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr), False], + [dict(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc, + pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr, + radial_velocity=105.7*u.km/u.s), True] + +]) +def test_frame_affinetransform(kwargs, expect_success): + """There are already tests in test_transformations.py that check that + an AffineTransform fails without full-space data, but this just checks that + things work as expected at the frame level as well. + """ + + icrs = ICRS(**kwargs) + + if expect_success: + gc = icrs.transform_to(Galactocentric) + + else: + with pytest.raises(ConvertError): + icrs.transform_to(Galactocentric) + +def test_differential_cls_arg(): + """ + Test passing in an explicit differential class to the initializer or + changing the differential class via set_representation_cls + """ + from ..builtin_frames import ICRS + + icrs = ICRS(ra=1*u.deg, dec=60*u.deg, + pm_ra=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr, + differential_cls=r.UnitSphericalDifferential) + assert icrs.pm_ra == 10*u.mas/u.yr + + icrs = ICRS(ra=1*u.deg, dec=60*u.deg, + pm_ra=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr, + differential_cls={'s': r.UnitSphericalDifferential}) + assert icrs.pm_ra == 10*u.mas/u.yr + + icrs = ICRS(ra=1*u.deg, dec=60*u.deg, + pm_ra_cosdec=10*u.mas/u.yr, pm_dec=-11*u.mas/u.yr) + icrs.set_representation_cls(s=r.UnitSphericalDifferential) + assert quantity_allclose(icrs.pm_ra, 20*u.mas/u.yr) + + # incompatible representation and differential + with pytest.raises(TypeError): + ICRS(ra=1*u.deg, dec=60*u.deg, + v_x=1*u.km/u.s, v_y=-2*u.km/u.s, v_z=-2*u.km/u.s, + differential_cls=r.CartesianDifferential) + + # specify both + icrs = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc, + v_x=1*u.km/u.s, v_y=2*u.km/u.s, v_z=3*u.km/u.s, + representation=r.CartesianRepresentation, + differential_cls=r.CartesianDifferential) + assert icrs.x == 1*u.pc + assert icrs.y == 2*u.pc + assert icrs.z == 3*u.pc + assert icrs.v_x == 1*u.km/u.s + assert icrs.v_y == 2*u.km/u.s + assert icrs.v_z == 3*u.km/u.s + + +def test_slicing_preserves_differential(): + icrs = ICRS(ra=37.4*u.deg, dec=-55.8*u.deg, distance=150*u.pc, + pm_ra_cosdec=-21.2*u.mas/u.yr, pm_dec=17.1*u.mas/u.yr, + radial_velocity=105.7*u.km/u.s) + icrs2 = icrs.reshape(1,1)[:1,0] + + for name in icrs.representation_component_names.keys(): + assert getattr(icrs, name) == getattr(icrs2, name)[0] + + for name in icrs.get_representation_component_names('s').keys(): + assert getattr(icrs, name) == getattr(icrs2, name)[0] diff --git a/astropy/coordinates/tests/test_funcs.py b/astropy/coordinates/tests/test_funcs.py new file mode 100644 index 0000000..78a1d0d --- /dev/null +++ b/astropy/coordinates/tests/test_funcs.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +Tests for miscellaneous functionality in the `funcs` module +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest +import numpy as np +from numpy import testing as npt + +from ...extern import six + +from ... import units as u +from ...time import Time + + +def test_sun(): + """ + Test that `get_sun` works and it behaves roughly as it should (in GCRS) + """ + from ..funcs import get_sun + + northern_summer_solstice = Time('2010-6-21') + northern_winter_solstice = Time('2010-12-21') + equinox_1 = Time('2010-3-21') + equinox_2 = Time('2010-9-21') + + gcrs1 = get_sun(equinox_1) + assert np.abs(gcrs1.dec.deg) < 1 + + gcrs2 = get_sun(Time([northern_summer_solstice, equinox_2, northern_winter_solstice])) + assert np.all(np.abs(gcrs2.dec - [23.5, 0, -23.5]*u.deg) < 1*u.deg) + + +def test_concatenate(): + from .. import FK5, SkyCoord + from ..funcs import concatenate + + fk5 = FK5(1*u.deg, 2*u.deg) + sc = SkyCoord(3*u.deg, 4*u.deg, frame='fk5') + + res = concatenate([fk5, sc]) + np.testing.assert_allclose(res.ra, [1, 3]*u.deg) + np.testing.assert_allclose(res.dec, [2, 4]*u.deg) + + with pytest.raises(TypeError): + concatenate(fk5) + + with pytest.raises(TypeError): + concatenate(1*u.deg) + + +def test_constellations(): + from .. import ICRS, FK5, SkyCoord + from ..funcs import get_constellation + + inuma = ICRS(9*u.hour, 65*u.deg) + res = get_constellation(inuma) + res_short = get_constellation(inuma, short_name=True) + assert res == 'Ursa Major' + assert res_short == 'UMa' + assert isinstance(res, six.string_types) or getattr(res, 'shape', None) == tuple() + + # these are taken from the ReadMe for Roman 1987 + ras = [9, 23.5, 5.12, 9.4555, 12.8888, 15.6687, 19, 6.2222] + decs = [65, -20, 9.12, -19.9, 22, -12.1234, -40, -81.1234] + shortnames = ['UMa', 'Aqr', 'Ori', 'Hya', 'Com', 'Lib', 'CrA', 'Men'] + + testcoos = FK5(ras*u.hour, decs*u.deg, equinox='B1950') + npt.assert_equal(get_constellation(testcoos, short_name=True), shortnames) + + # test on a SkyCoord, *and* test Boötes, which is special in that it has a + # non-ASCII character + bootest = SkyCoord(15*u.hour, 30*u.deg, frame='icrs') + boores = get_constellation(bootest) + assert boores == u'Boötes' + assert isinstance(boores, six.string_types) or getattr(boores, 'shape', None) == tuple() diff --git a/astropy/coordinates/tests/test_iau_fullstack.py b/astropy/coordinates/tests/test_iau_fullstack.py new file mode 100644 index 0000000..c0d8322 --- /dev/null +++ b/astropy/coordinates/tests/test_iau_fullstack.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest +import numpy as np +from numpy import testing as npt + +from ... import units as u +from ...time import Time +from ..builtin_frames import ICRS, AltAz +from ..builtin_frames.utils import get_jd12 +from .. import EarthLocation +from .. import SkyCoord +from ...tests.helper import catch_warnings +from ... import _erfa as erfa +from ...utils import iers +from .utils import randomly_sample_sphere + + +# These fixtures are used in test_iau_fullstack +@pytest.fixture(scope="function") +def fullstack_icrs(): + ra, dec, _ = randomly_sample_sphere(1000) + return ICRS(ra=ra, dec=dec) + + +@pytest.fixture(scope="function") +def fullstack_fiducial_altaz(fullstack_icrs): + altazframe = AltAz(location=EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m), + obstime=Time('J2000')) + return fullstack_icrs.transform_to(altazframe) + + +@pytest.fixture(scope="function", params=['J2000.1', 'J2010']) +def fullstack_times(request): + return Time(request.param) + + +@pytest.fixture(scope="function", params=[(0, 0, 0), (23, 0, 0), (-70, 0, 0), (0, 100, 0), (23, 0, 3000)]) +def fullstack_locations(request): + return EarthLocation(lat=request.param[0]*u.deg, lon=request.param[0]*u.deg, + height=request.param[0]*u.m) + + +@pytest.fixture(scope="function", params=[(0*u.bar, 0*u.deg_C, 0, 1*u.micron), + (1*u.bar, 0*u.deg_C, 0, 1*u.micron), + (1*u.bar, 10*u.deg_C, 0, 1*u.micron), + (1*u.bar, 0*u.deg_C, .5, 1*u.micron), + (1*u.bar, 0*u.deg_C, 0, 21*u.cm)]) +def fullstack_obsconditions(request): + return request.param + + +def _erfa_check(ira, idec, astrom): + """ + This function does the same thing the astropy layer is supposed to do, but + all in erfa + """ + cra, cdec = erfa.atciq(ira, idec, 0, 0, 0, 0, astrom) + az, zen, ha, odec, ora = erfa.atioq(cra, cdec, astrom) + alt = np.pi/2-zen + cra2, cdec2 = erfa.atoiq('A', az, zen, astrom) + ira2, idec2 = erfa.aticq(cra2, cdec2, astrom) + + dct = locals() + del dct['astrom'] + return dct + + +def test_iau_fullstack(fullstack_icrs, fullstack_fiducial_altaz, + fullstack_times, fullstack_locations, + fullstack_obsconditions): + """ + Test the full transform from ICRS <-> AltAz + """ + + # create the altaz frame + altazframe = AltAz(obstime=fullstack_times, location=fullstack_locations, + pressure=fullstack_obsconditions[0], + temperature=fullstack_obsconditions[1], + relative_humidity=fullstack_obsconditions[2], + obswl=fullstack_obsconditions[3]) + + aacoo = fullstack_icrs.transform_to(altazframe) + + # compare aacoo to the fiducial AltAz - should always be different + assert np.all(np.abs(aacoo.alt - fullstack_fiducial_altaz.alt) > 50*u.milliarcsecond) + assert np.all(np.abs(aacoo.az - fullstack_fiducial_altaz.az) > 50*u.milliarcsecond) + + # if the refraction correction is included, we *only* do the comparisons + # where altitude >5 degrees. The SOFA guides imply that below 5 is where + # where accuracy gets more problematic, and testing reveals that alt<~0 + # gives garbage round-tripping, and <10 can give ~1 arcsec uncertainty + if fullstack_obsconditions[0].value == 0: + # but if there is no refraction correction, check everything + msk = slice(None) + tol = 5*u.microarcsecond + else: + msk = aacoo.alt > 5*u.deg + # most of them aren't this bad, but some of those at low alt are offset + # this much. For alt > 10, this is always better than 100 masec + tol = 750*u.milliarcsecond + + # now make sure the full stack round-tripping works + icrs2 = aacoo.transform_to(ICRS) + + adras = np.abs(fullstack_icrs.ra - icrs2.ra)[msk] + addecs = np.abs(fullstack_icrs.dec - icrs2.dec)[msk] + assert np.all(adras < tol), 'largest RA change is {0} mas, > {1}'.format(np.max(adras.arcsec*1000), tol) + assert np.all(addecs < tol), 'largest Dec change is {0} mas, > {1}'.format(np.max(addecs.arcsec*1000), tol) + + # check that we're consistent with the ERFA alt/az result + xp, yp = u.Quantity(iers.IERS_Auto.open().pm_xy(fullstack_times)).to_value(u.radian) + lon = fullstack_locations.geodetic[0].to_value(u.radian) + lat = fullstack_locations.geodetic[1].to_value(u.radian) + height = fullstack_locations.geodetic[2].to_value(u.m) + jd1, jd2 = get_jd12(fullstack_times, 'utc') + astrom, eo = erfa.apco13(jd1, jd2, + fullstack_times.delta_ut1_utc, + lon, lat, height, + xp, yp, + fullstack_obsconditions[0].to_value(u.hPa), + fullstack_obsconditions[1].to_value(u.deg_C), + fullstack_obsconditions[2], + fullstack_obsconditions[3].to_value(u.micron)) + erfadct = _erfa_check(fullstack_icrs.ra.rad, fullstack_icrs.dec.rad, astrom) + npt.assert_allclose(erfadct['alt'], aacoo.alt.radian, atol=1e-7) + npt.assert_allclose(erfadct['az'], aacoo.az.radian, atol=1e-7) + + +def test_fiducial_roudtrip(fullstack_icrs, fullstack_fiducial_altaz): + """ + Test the full transform from ICRS <-> AltAz + """ + aacoo = fullstack_icrs.transform_to(fullstack_fiducial_altaz) + + # make sure the round-tripping works + icrs2 = aacoo.transform_to(ICRS) + npt.assert_allclose(fullstack_icrs.ra.deg, icrs2.ra.deg) + npt.assert_allclose(fullstack_icrs.dec.deg, icrs2.dec.deg) + + +def test_future_altaz(): + """ + While this does test the full stack, it is mostly meant to check that a + warning is raised when attempting to get to AltAz in the future (beyond + IERS tables) + """ + from ...utils.exceptions import AstropyWarning + + # this is an ugly hack to get the warning to show up even if it has already + # appeared + from ..builtin_frames import utils + if hasattr(utils, '__warningregistry__'): + utils.__warningregistry__.clear() + + with catch_warnings() as found_warnings: + + location = EarthLocation(lat=0*u.deg, lon=0*u.deg) + t = Time('J2161') + + SkyCoord(1*u.deg, 2*u.deg).transform_to(AltAz(location=location, obstime=t)) + + # check that these message(s) appear among any other warnings. If tests are run with + # --remote-data then the IERS table will be an instance of IERS_Auto which is + # assured of being "fresh". In this case getting times outside the range of the + # table does not raise an exception. Only if using IERS_B (which happens without + # --remote-data, i.e. for all CI testing) do we expect another warning. + messages_to_find = ["Tried to get polar motions for times after IERS data is valid."] + if isinstance(iers.IERS_Auto.iers_table, iers.IERS_B): + messages_to_find.append("(some) times are outside of range covered by IERS table.") + + messages_found = [False for _ in messages_to_find] + for w in found_warnings: + if issubclass(w.category, AstropyWarning): + for i, message_to_find in enumerate(messages_to_find): + if message_to_find in str(w.message): + messages_found[i] = True + assert all(messages_found) diff --git a/astropy/coordinates/tests/test_intermediate_transformations.py b/astropy/coordinates/tests/test_intermediate_transformations.py new file mode 100644 index 0000000..1785d29 --- /dev/null +++ b/astropy/coordinates/tests/test_intermediate_transformations.py @@ -0,0 +1,521 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""Accuracy tests for GCRS coordinate transformations, primarily to/from AltAz. + +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest +import numpy as np + +from ... import units as u +from ...tests.helper import (remote_data, quantity_allclose as allclose, + assert_quantity_allclose as assert_allclose) +from ...time import Time +from .. import (EarthLocation, get_sun, ICRS, GCRS, CIRS, ITRS, AltAz, + PrecessedGeocentric, CartesianRepresentation, SkyCoord, + SphericalRepresentation, UnitSphericalRepresentation, + HCRS, HeliocentricTrueEcliptic) + + +from ..._erfa import epv00 + +from .utils import randomly_sample_sphere +from ..builtin_frames.utils import get_jd12 +from .. import solar_system_ephemeris + +try: + import jplephem # pylint: disable=W0611 +except ImportError: + HAS_JPLEPHEM = False +else: + HAS_JPLEPHEM = True + + +def test_icrs_cirs(): + """ + Check a few cases of ICRS<->CIRS for consistency. + + Also includes the CIRS<->CIRS transforms at different times, as those go + through ICRS + """ + ra, dec, dist = randomly_sample_sphere(200) + inod = ICRS(ra=ra, dec=dec) + iwd = ICRS(ra=ra, dec=dec, distance=dist*u.pc) + + cframe1 = CIRS() + cirsnod = inod.transform_to(cframe1) # uses the default time + # first do a round-tripping test + inod2 = cirsnod.transform_to(ICRS) + assert_allclose(inod.ra, inod2.ra) + assert_allclose(inod.dec, inod2.dec) + + # now check that a different time yields different answers + cframe2 = CIRS(obstime=Time('J2005', scale='utc')) + cirsnod2 = inod.transform_to(cframe2) + assert not allclose(cirsnod.ra, cirsnod2.ra, rtol=1e-8) + assert not allclose(cirsnod.dec, cirsnod2.dec, rtol=1e-8) + + # parallax effects should be included, so with and w/o distance should be different + cirswd = iwd.transform_to(cframe1) + assert not allclose(cirswd.ra, cirsnod.ra, rtol=1e-8) + assert not allclose(cirswd.dec, cirsnod.dec, rtol=1e-8) + # and the distance should transform at least somehow + assert not allclose(cirswd.distance, iwd.distance, rtol=1e-8) + + # now check that the cirs self-transform works as expected + cirsnod3 = cirsnod.transform_to(cframe1) # should be a no-op + assert_allclose(cirsnod.ra, cirsnod3.ra) + assert_allclose(cirsnod.dec, cirsnod3.dec) + + cirsnod4 = cirsnod.transform_to(cframe2) # should be different + assert not allclose(cirsnod4.ra, cirsnod.ra, rtol=1e-8) + assert not allclose(cirsnod4.dec, cirsnod.dec, rtol=1e-8) + + cirsnod5 = cirsnod4.transform_to(cframe1) # should be back to the same + assert_allclose(cirsnod.ra, cirsnod5.ra) + assert_allclose(cirsnod.dec, cirsnod5.dec) + + +ra, dec, dist = randomly_sample_sphere(200) +icrs_coords = [ICRS(ra=ra, dec=dec), ICRS(ra=ra, dec=dec, distance=dist*u.pc)] +gcrs_frames = [GCRS(), GCRS(obstime=Time('J2005', scale='utc'))] + + +@pytest.mark.parametrize('icoo', icrs_coords) +def test_icrs_gcrs(icoo): + """ + Check ICRS<->GCRS for consistency + """ + gcrscoo = icoo.transform_to(gcrs_frames[0]) # uses the default time + # first do a round-tripping test + icoo2 = gcrscoo.transform_to(ICRS) + assert_allclose(icoo.distance, icoo2.distance) + assert_allclose(icoo.ra, icoo2.ra) + assert_allclose(icoo.dec, icoo2.dec) + assert isinstance(icoo2.data, icoo.data.__class__) + + # now check that a different time yields different answers + gcrscoo2 = icoo.transform_to(gcrs_frames[1]) + assert not allclose(gcrscoo.ra, gcrscoo2.ra, rtol=1e-8, atol=1e-10*u.deg) + assert not allclose(gcrscoo.dec, gcrscoo2.dec, rtol=1e-8, atol=1e-10*u.deg) + + # now check that the cirs self-transform works as expected + gcrscoo3 = gcrscoo.transform_to(gcrs_frames[0]) # should be a no-op + assert_allclose(gcrscoo.ra, gcrscoo3.ra) + assert_allclose(gcrscoo.dec, gcrscoo3.dec) + + gcrscoo4 = gcrscoo.transform_to(gcrs_frames[1]) # should be different + assert not allclose(gcrscoo4.ra, gcrscoo.ra, rtol=1e-8, atol=1e-10*u.deg) + assert not allclose(gcrscoo4.dec, gcrscoo.dec, rtol=1e-8, atol=1e-10*u.deg) + + gcrscoo5 = gcrscoo4.transform_to(gcrs_frames[0]) # should be back to the same + assert_allclose(gcrscoo.ra, gcrscoo5.ra, rtol=1e-8, atol=1e-10*u.deg) + assert_allclose(gcrscoo.dec, gcrscoo5.dec, rtol=1e-8, atol=1e-10*u.deg) + + # also make sure that a GCRS with a different geoloc/geovel gets a different answer + # roughly a moon-like frame + gframe3 = GCRS(obsgeoloc=[385000., 0, 0]*u.km, obsgeovel=[1, 0, 0]*u.km/u.s) + gcrscoo6 = icoo.transform_to(gframe3) # should be different + assert not allclose(gcrscoo.ra, gcrscoo6.ra, rtol=1e-8, atol=1e-10*u.deg) + assert not allclose(gcrscoo.dec, gcrscoo6.dec, rtol=1e-8, atol=1e-10*u.deg) + icooviag3 = gcrscoo6.transform_to(ICRS) # and now back to the original + assert_allclose(icoo.ra, icooviag3.ra) + assert_allclose(icoo.dec, icooviag3.dec) + + +@pytest.mark.parametrize('gframe', gcrs_frames) +def test_icrs_gcrs_dist_diff(gframe): + """ + Check that with and without distance give different ICRS<->GCRS answers + """ + gcrsnod = icrs_coords[0].transform_to(gframe) + gcrswd = icrs_coords[1].transform_to(gframe) + + # parallax effects should be included, so with and w/o distance should be different + assert not allclose(gcrswd.ra, gcrsnod.ra, rtol=1e-8, atol=1e-10*u.deg) + assert not allclose(gcrswd.dec, gcrsnod.dec, rtol=1e-8, atol=1e-10*u.deg) + # and the distance should transform at least somehow + assert not allclose(gcrswd.distance, icrs_coords[1].distance, rtol=1e-8, + atol=1e-10*u.pc) + + +def test_cirs_to_altaz(): + """ + Check the basic CIRS<->AltAz transforms. More thorough checks implicitly + happen in `test_iau_fullstack` + """ + from .. import EarthLocation + + ra, dec, dist = randomly_sample_sphere(200) + cirs = CIRS(ra=ra, dec=dec, obstime='J2000') + crepr = SphericalRepresentation(lon=ra, lat=dec, distance=dist) + cirscart = CIRS(crepr, obstime=cirs.obstime, representation=CartesianRepresentation) + + loc = EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m) + altazframe = AltAz(location=loc, obstime=Time('J2005')) + + cirs2 = cirs.transform_to(altazframe).transform_to(cirs) + cirs3 = cirscart.transform_to(altazframe).transform_to(cirs) + + # check round-tripping + assert_allclose(cirs.ra, cirs2.ra) + assert_allclose(cirs.dec, cirs2.dec) + assert_allclose(cirs.ra, cirs3.ra) + assert_allclose(cirs.dec, cirs3.dec) + + +def test_gcrs_itrs(): + """ + Check basic GCRS<->ITRS transforms for round-tripping. + """ + ra, dec, _ = randomly_sample_sphere(200) + gcrs = GCRS(ra=ra, dec=dec, obstime='J2000') + gcrs6 = GCRS(ra=ra, dec=dec, obstime='J2006') + + gcrs2 = gcrs.transform_to(ITRS).transform_to(gcrs) + gcrs6_2 = gcrs6.transform_to(ITRS).transform_to(gcrs) + + assert_allclose(gcrs.ra, gcrs2.ra) + assert_allclose(gcrs.dec, gcrs2.dec) + assert not allclose(gcrs.ra, gcrs6_2.ra) + assert not allclose(gcrs.dec, gcrs6_2.dec) + + # also try with the cartesian representation + gcrsc = gcrs.realize_frame(gcrs.data) + gcrsc.representation = CartesianRepresentation + gcrsc2 = gcrsc.transform_to(ITRS).transform_to(gcrsc) + assert_allclose(gcrsc.spherical.lon.deg, gcrsc2.ra.deg) + assert_allclose(gcrsc.spherical.lat, gcrsc2.dec) + + +def test_cirs_itrs(): + """ + Check basic CIRS<->ITRS transforms for round-tripping. + """ + ra, dec, _ = randomly_sample_sphere(200) + cirs = CIRS(ra=ra, dec=dec, obstime='J2000') + cirs6 = CIRS(ra=ra, dec=dec, obstime='J2006') + + cirs2 = cirs.transform_to(ITRS).transform_to(cirs) + cirs6_2 = cirs6.transform_to(ITRS).transform_to(cirs) # different obstime + + # just check round-tripping + assert_allclose(cirs.ra, cirs2.ra) + assert_allclose(cirs.dec, cirs2.dec) + assert not allclose(cirs.ra, cirs6_2.ra) + assert not allclose(cirs.dec, cirs6_2.dec) + + +def test_gcrs_cirs(): + """ + Check GCRS<->CIRS transforms for round-tripping. More complicated than the + above two because it's multi-hop + """ + ra, dec, _ = randomly_sample_sphere(200) + gcrs = GCRS(ra=ra, dec=dec, obstime='J2000') + gcrs6 = GCRS(ra=ra, dec=dec, obstime='J2006') + + gcrs2 = gcrs.transform_to(CIRS).transform_to(gcrs) + gcrs6_2 = gcrs6.transform_to(CIRS).transform_to(gcrs) + + assert_allclose(gcrs.ra, gcrs2.ra) + assert_allclose(gcrs.dec, gcrs2.dec) + assert not allclose(gcrs.ra, gcrs6_2.ra) + assert not allclose(gcrs.dec, gcrs6_2.dec) + + # now try explicit intermediate pathways and ensure they're all consistent + gcrs3 = gcrs.transform_to(ITRS).transform_to(CIRS).transform_to(ITRS).transform_to(gcrs) + assert_allclose(gcrs.ra, gcrs3.ra) + assert_allclose(gcrs.dec, gcrs3.dec) + + gcrs4 = gcrs.transform_to(ICRS).transform_to(CIRS).transform_to(ICRS).transform_to(gcrs) + assert_allclose(gcrs.ra, gcrs4.ra) + assert_allclose(gcrs.dec, gcrs4.dec) + + +def test_gcrs_altaz(): + """ + Check GCRS<->AltAz transforms for round-tripping. Has multiple paths + """ + from .. import EarthLocation + + ra, dec, _ = randomly_sample_sphere(1) + gcrs = GCRS(ra=ra[0], dec=dec[0], obstime='J2000') + + # check array times sure N-d arrays work + times = Time(np.linspace(2456293.25, 2456657.25, 51) * u.day, + format='jd', scale='utc') + + loc = EarthLocation(lon=10 * u.deg, lat=80. * u.deg) + aaframe = AltAz(obstime=times, location=loc) + + aa1 = gcrs.transform_to(aaframe) + aa2 = gcrs.transform_to(ICRS).transform_to(CIRS).transform_to(aaframe) + aa3 = gcrs.transform_to(ITRS).transform_to(CIRS).transform_to(aaframe) + + # make sure they're all consistent + assert_allclose(aa1.alt, aa2.alt) + assert_allclose(aa1.az, aa2.az) + assert_allclose(aa1.alt, aa3.alt) + assert_allclose(aa1.az, aa3.az) + + +def test_precessed_geocentric(): + assert PrecessedGeocentric().equinox.jd == Time('J2000', scale='utc').jd + + gcrs_coo = GCRS(180*u.deg, 2*u.deg, distance=10000*u.km) + pgeo_coo = gcrs_coo.transform_to(PrecessedGeocentric) + assert np.abs(gcrs_coo.ra - pgeo_coo.ra) > 10*u.marcsec + assert np.abs(gcrs_coo.dec - pgeo_coo.dec) > 10*u.marcsec + assert_allclose(gcrs_coo.distance, pgeo_coo.distance) + + gcrs_roundtrip = pgeo_coo.transform_to(GCRS) + assert_allclose(gcrs_coo.ra, gcrs_roundtrip.ra) + assert_allclose(gcrs_coo.dec, gcrs_roundtrip.dec) + assert_allclose(gcrs_coo.distance, gcrs_roundtrip.distance) + + pgeo_coo2 = gcrs_coo.transform_to(PrecessedGeocentric(equinox='B1850')) + assert np.abs(gcrs_coo.ra - pgeo_coo2.ra) > 1.5*u.deg + assert np.abs(gcrs_coo.dec - pgeo_coo2.dec) > 0.5*u.deg + assert_allclose(gcrs_coo.distance, pgeo_coo2.distance) + + gcrs2_roundtrip = pgeo_coo2.transform_to(GCRS) + assert_allclose(gcrs_coo.ra, gcrs2_roundtrip.ra) + assert_allclose(gcrs_coo.dec, gcrs2_roundtrip.dec) + assert_allclose(gcrs_coo.distance, gcrs2_roundtrip.distance) + + +# shared by parametrized tests below. Some use the whole AltAz, others use just obstime +totest_frames = [AltAz(location=EarthLocation(-90*u.deg, 65*u.deg), + obstime=Time('J2000')), # J2000 is often a default so this might work when others don't + AltAz(location=EarthLocation(120*u.deg, -35*u.deg), + obstime=Time('J2000')), + AltAz(location=EarthLocation(-90*u.deg, 65*u.deg), + obstime=Time('2014-01-01 00:00:00')), + AltAz(location=EarthLocation(-90*u.deg, 65*u.deg), + obstime=Time('2014-08-01 08:00:00')), + AltAz(location=EarthLocation(120*u.deg, -35*u.deg), + obstime=Time('2014-01-01 00:00:00')) + ] +MOONDIST = 385000*u.km # approximate moon semi-major orbit axis of moon +MOONDIST_CART = CartesianRepresentation(3**-0.5*MOONDIST, 3**-0.5*MOONDIST, 3**-0.5*MOONDIST) +EARTHECC = 0.017 + 0.005 # roughly earth orbital eccentricity, but with an added tolerance + + +@pytest.mark.parametrize('testframe', totest_frames) +def test_gcrs_altaz_sunish(testframe): + """ + Sanity-check that the sun is at a reasonable distance from any altaz + """ + sun = get_sun(testframe.obstime) + + assert sun.frame.name == 'gcrs' + + # the .to(u.au) is not necessary, it just makes the asserts on failure more readable + assert (EARTHECC - 1)*u.au < sun.distance.to(u.au) < (EARTHECC + 1)*u.au + + sunaa = sun.transform_to(testframe) + assert (EARTHECC - 1)*u.au < sunaa.distance.to(u.au) < (EARTHECC + 1)*u.au + + +@pytest.mark.parametrize('testframe', totest_frames) +def test_gcrs_altaz_moonish(testframe): + """ + Sanity-check that an object resembling the moon goes to the right place with + a GCRS->AltAz transformation + """ + moon = GCRS(MOONDIST_CART, obstime=testframe.obstime) + + moonaa = moon.transform_to(testframe) + + # now check that the distance change is similar to earth radius + assert 1000*u.km < np.abs(moonaa.distance - moon.distance).to(u.au) < 7000*u.km + + # now check that it round-trips + moon2 = moonaa.transform_to(moon) + assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz) + + # also should add checks that the alt/az are different for different earth locations + + +@pytest.mark.parametrize('testframe', totest_frames) +def test_gcrs_altaz_bothroutes(testframe): + """ + Repeat of both the moonish and sunish tests above to make sure the two + routes through the coordinate graph are consistent with each other + """ + sun = get_sun(testframe.obstime) + sunaa_viaicrs = sun.transform_to(ICRS).transform_to(testframe) + sunaa_viaitrs = sun.transform_to(ITRS(obstime=testframe.obstime)).transform_to(testframe) + + moon = GCRS(MOONDIST_CART, obstime=testframe.obstime) + moonaa_viaicrs = moon.transform_to(ICRS).transform_to(testframe) + moonaa_viaitrs = moon.transform_to(ITRS(obstime=testframe.obstime)).transform_to(testframe) + + assert_allclose(sunaa_viaicrs.cartesian.xyz, sunaa_viaitrs.cartesian.xyz) + assert_allclose(moonaa_viaicrs.cartesian.xyz, moonaa_viaitrs.cartesian.xyz) + + +@pytest.mark.parametrize('testframe', totest_frames) +def test_cirs_altaz_moonish(testframe): + """ + Sanity-check that an object resembling the moon goes to the right place with + a CIRS<->AltAz transformation + """ + moon = CIRS(MOONDIST_CART, obstime=testframe.obstime) + + moonaa = moon.transform_to(testframe) + assert 1000*u.km < np.abs(moonaa.distance - moon.distance).to(u.km) < 7000*u.km + + # now check that it round-trips + moon2 = moonaa.transform_to(moon) + assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz) + + +@pytest.mark.parametrize('testframe', totest_frames) +def test_cirs_altaz_nodist(testframe): + """ + Check that a UnitSphericalRepresentation coordinate round-trips for the + CIRS<->AltAz transformation. + """ + coo0 = CIRS(UnitSphericalRepresentation(10*u.deg, 20*u.deg), obstime=testframe.obstime) + + # check that it round-trips + coo1 = coo0.transform_to(testframe).transform_to(coo0) + assert_allclose(coo0.cartesian.xyz, coo1.cartesian.xyz) + + +@pytest.mark.parametrize('testframe', totest_frames) +def test_cirs_icrs_moonish(testframe): + """ + check that something like the moon goes to about the right distance from the + ICRS origin when starting from CIRS + """ + moonish = CIRS(MOONDIST_CART, obstime=testframe.obstime) + moonicrs = moonish.transform_to(ICRS) + + assert 0.97*u.au < moonicrs.distance < 1.03*u.au + + +@pytest.mark.parametrize('testframe', totest_frames) +def test_gcrs_icrs_moonish(testframe): + """ + check that something like the moon goes to about the right distance from the + ICRS origin when starting from GCRS + """ + moonish = GCRS(MOONDIST_CART, obstime=testframe.obstime) + moonicrs = moonish.transform_to(ICRS) + + assert 0.97*u.au < moonicrs.distance < 1.03*u.au + + +@pytest.mark.parametrize('testframe', totest_frames) +def test_icrs_gcrscirs_sunish(testframe): + """ + check that the ICRS barycenter goes to about the right distance from various + ~geocentric frames (other than testframe) + """ + # slight offset to avoid divide-by-zero errors + icrs = ICRS(0*u.deg, 0*u.deg, distance=10*u.km) + + gcrs = icrs.transform_to(GCRS(obstime=testframe.obstime)) + assert (EARTHECC - 1)*u.au < gcrs.distance.to(u.au) < (EARTHECC + 1)*u.au + + cirs = icrs.transform_to(CIRS(obstime=testframe.obstime)) + assert (EARTHECC - 1)*u.au < cirs.distance.to(u.au) < (EARTHECC + 1)*u.au + + itrs = icrs.transform_to(ITRS(obstime=testframe.obstime)) + assert (EARTHECC - 1)*u.au < itrs.spherical.distance.to(u.au) < (EARTHECC + 1)*u.au + + +@pytest.mark.parametrize('testframe', totest_frames) +def test_icrs_altaz_moonish(testframe): + """ + Check that something expressed in *ICRS* as being moon-like goes to the + right AltAz distance + """ + # we use epv00 instead of get_sun because get_sun includes aberration + earth_pv_helio, earth_pv_bary = epv00(*get_jd12(testframe.obstime, 'tdb')) + earth_icrs_xyz = earth_pv_bary[0]*u.au + moonoffset = [0, 0, MOONDIST.value]*MOONDIST.unit + moonish_icrs = ICRS(CartesianRepresentation(earth_icrs_xyz + moonoffset)) + moonaa = moonish_icrs.transform_to(testframe) + + # now check that the distance change is similar to earth radius + assert 1000*u.km < np.abs(moonaa.distance - MOONDIST).to(u.au) < 7000*u.km + + +def test_gcrs_self_transform_closeby(): + """ + Tests GCRS self transform for objects which are nearby and thus + have reasonable parallax. + + Moon positions were originally created using JPL DE432s ephemeris. + + The two lunar positions (one geocentric, one at a defined location) + are created via a transformation from ICRS to two different GCRS frames. + + We test that the GCRS-GCRS self transform can correctly map one GCRS + frame onto the other. + """ + t = Time("2014-12-25T07:00") + moon_geocentric = SkyCoord(GCRS(318.10579159*u.deg, + -11.65281165*u.deg, + 365042.64880308*u.km, obstime=t)) + + # this is the location of the Moon as seen from La Palma + obsgeoloc = [-5592982.59658935, -63054.1948592, 3059763.90102216]*u.m + obsgeovel = [4.59798494, -407.84677071, 0.]*u.m/u.s + moon_lapalma = SkyCoord(GCRS(318.7048445*u.deg, + -11.98761996*u.deg, + 369722.8231031*u.km, + obstime=t, + obsgeoloc=obsgeoloc, + obsgeovel=obsgeovel)) + + transformed = moon_geocentric.transform_to(moon_lapalma.frame) + delta = transformed.separation_3d(moon_lapalma) + assert_allclose(delta, 0.0*u.m, atol=1*u.m) + + +@remote_data +@pytest.mark.skipif('not HAS_JPLEPHEM') +def test_ephemerides(): + """ + We test that using different ephemerides gives very similar results + for transformations + """ + t = Time("2014-12-25T07:00") + moon = SkyCoord(GCRS(318.10579159*u.deg, + -11.65281165*u.deg, + 365042.64880308*u.km, obstime=t)) + + icrs_frame = ICRS() + hcrs_frame = HCRS(obstime=t) + ecl_frame = HeliocentricTrueEcliptic(equinox=t) + cirs_frame = CIRS(obstime=t) + + moon_icrs_builtin = moon.transform_to(icrs_frame) + moon_hcrs_builtin = moon.transform_to(hcrs_frame) + moon_helioecl_builtin = moon.transform_to(ecl_frame) + moon_cirs_builtin = moon.transform_to(cirs_frame) + + with solar_system_ephemeris.set('jpl'): + moon_icrs_jpl = moon.transform_to(icrs_frame) + moon_hcrs_jpl = moon.transform_to(hcrs_frame) + moon_helioecl_jpl = moon.transform_to(ecl_frame) + moon_cirs_jpl = moon.transform_to(cirs_frame) + + # most transformations should differ by an amount which is + # non-zero but of order milliarcsecs + sep_icrs = moon_icrs_builtin.separation(moon_icrs_jpl) + sep_hcrs = moon_hcrs_builtin.separation(moon_hcrs_jpl) + sep_helioecl = moon_helioecl_builtin.separation(moon_helioecl_jpl) + sep_cirs = moon_cirs_builtin.separation(moon_cirs_jpl) + + assert_allclose([sep_icrs, sep_hcrs, sep_helioecl], 0.0*u.deg, atol=10*u.mas) + assert all(sep > 10*u.microarcsecond for sep in (sep_icrs, sep_hcrs, sep_helioecl)) + + # CIRS should be the same + assert_allclose(sep_cirs, 0.0*u.deg, atol=1*u.microarcsecond) diff --git a/astropy/coordinates/tests/test_matching.py b/astropy/coordinates/tests/test_matching.py new file mode 100644 index 0000000..472ba86 --- /dev/null +++ b/astropy/coordinates/tests/test_matching.py @@ -0,0 +1,290 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest +import numpy as np +from numpy import testing as npt + +from ...tests.helper import assert_quantity_allclose as assert_allclose +from ...extern.six.moves import zip + +from ... import units as u +from ...utils import minversion + +from .. import matching + +""" +These are the tests for coordinate matching. + +Note that this requires scipy. +""" + +try: + import scipy + HAS_SCIPY = True +except ImportError: + HAS_SCIPY = False + +if HAS_SCIPY and minversion(scipy, '0.12.0', inclusive=False): + OLDER_SCIPY = False +else: + OLDER_SCIPY = True + + +@pytest.mark.skipif(str('not HAS_SCIPY')) +def test_matching_function(): + from .. import ICRS + from ..matching import match_coordinates_3d + # this only uses match_coordinates_3d because that's the actual implementation + + cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree) + ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree) + + idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog) + npt.assert_array_equal(idx, [3, 1]) + npt.assert_array_almost_equal(d2d.degree, [0, 0.1]) + assert d3d.value[0] == 0 + + idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, nthneighbor=2) + assert np.all(idx == 2) + npt.assert_array_almost_equal(d2d.degree, [1, 0.9]) + npt.assert_array_less(d3d.value, 0.02) + + +@pytest.mark.skipif(str('not HAS_SCIPY')) +def test_matching_function_3d_and_sky(): + from .. import ICRS + from ..matching import match_coordinates_3d, match_coordinates_sky + + cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) + ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc) + + idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog) + npt.assert_array_equal(idx, [2, 3]) + + assert_allclose(d2d, [1, 1.9] * u.deg) + assert np.abs(d3d[0].to_value(u.kpc) - np.radians(1)) < 1e-6 + assert np.abs(d3d[1].to_value(u.kpc) - 5*np.radians(1.9)) < 1e-5 + + idx, d2d, d3d = match_coordinates_sky(cmatch, ccatalog) + npt.assert_array_equal(idx, [3, 1]) + + assert_allclose(d2d, [0, 0.1] * u.deg) + assert_allclose(d3d, [4, 4.0000019] * u.kpc) + + +@pytest.mark.parametrize('functocheck, args, defaultkdtname, bothsaved', + [(matching.match_coordinates_3d, [], 'kdtree_3d', False), + (matching.match_coordinates_sky, [], 'kdtree_sky', False), + (matching.search_around_3d, [1*u.kpc], 'kdtree_3d', True), + (matching.search_around_sky, [1*u.deg], 'kdtree_sky', False) + ]) +@pytest.mark.skipif(str('not HAS_SCIPY')) +def test_kdtree_storage(functocheck, args, defaultkdtname, bothsaved): + from .. import ICRS + + def make_scs(): + cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 2]*u.kpc) + ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 2, 3, 4]*u.kpc) + return cmatch, ccatalog + + cmatch, ccatalog = make_scs() + functocheck(cmatch, ccatalog, *args, storekdtree=False) + assert 'kdtree' not in ccatalog.cache + assert defaultkdtname not in ccatalog.cache + + cmatch, ccatalog = make_scs() + functocheck(cmatch, ccatalog, *args) + assert defaultkdtname in ccatalog.cache + assert 'kdtree' not in ccatalog.cache + + cmatch, ccatalog = make_scs() + functocheck(cmatch, ccatalog, *args, storekdtree=True) + assert 'kdtree' in ccatalog.cache + assert defaultkdtname not in ccatalog.cache + + cmatch, ccatalog = make_scs() + assert 'tislit_cheese' not in ccatalog.cache + functocheck(cmatch, ccatalog, *args, storekdtree='tislit_cheese') + assert 'tislit_cheese' in ccatalog.cache + assert defaultkdtname not in ccatalog.cache + assert 'kdtree' not in ccatalog.cache + if bothsaved: + assert 'tislit_cheese' in cmatch.cache + assert defaultkdtname not in cmatch.cache + assert 'kdtree' not in cmatch.cache + else: + assert 'tislit_cheese' not in cmatch.cache + + # now a bit of a hacky trick to make sure it at least tries to *use* it + ccatalog.cache['tislit_cheese'] = 1 + cmatch.cache['tislit_cheese'] = 1 + with pytest.raises(TypeError) as e: + functocheck(cmatch, ccatalog, *args, storekdtree='tislit_cheese') + assert 'KD' in e.value.args[0] + + +@pytest.mark.skipif(str('not HAS_SCIPY')) +def test_matching_method(): + from .. import ICRS, SkyCoord + from ...utils import NumpyRNGContext + from ..matching import match_coordinates_3d, match_coordinates_sky + + with NumpyRNGContext(987654321): + cmatch = ICRS(np.random.rand(20) * 360.*u.degree, + (np.random.rand(20) * 180. - 90.)*u.degree) + ccatalog = ICRS(np.random.rand(100) * 360. * u.degree, + (np.random.rand(100) * 180. - 90.)*u.degree) + + idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_3d(ccatalog) + idx2, d2d2, d3d2 = match_coordinates_3d(cmatch, ccatalog) + + npt.assert_array_equal(idx1, idx2) + assert_allclose(d2d1, d2d2) + assert_allclose(d3d1, d3d2) + + # should be the same as above because there's no distance, but just make sure this method works + idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_sky(ccatalog) + idx2, d2d2, d3d2 = match_coordinates_sky(cmatch, ccatalog) + + npt.assert_array_equal(idx1, idx2) + assert_allclose(d2d1, d2d2) + assert_allclose(d3d1, d3d2) + + assert len(idx1) == len(d2d1) == len(d3d1) == 20 + + +@pytest.mark.skipif(str('not HAS_SCIPY')) +@pytest.mark.skipif(str('OLDER_SCIPY')) +def test_search_around(): + from .. import ICRS, SkyCoord + from ..matching import search_around_sky, search_around_3d + + coo1 = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) + coo2 = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc) + + idx1_1deg, idx2_1deg, d2d_1deg, d3d_1deg = search_around_sky(coo1, coo2, 1.01*u.deg) + idx1_0p05deg, idx2_0p05deg, d2d_0p05deg, d3d_0p05deg = search_around_sky(coo1, coo2, 0.05*u.deg) + + assert list(zip(idx1_1deg, idx2_1deg)) == [(0, 2), (0, 3), (1, 1), (1, 2)] + assert d2d_1deg[0] == 1.0*u.deg + assert_allclose(d2d_1deg, [1, 0, .1, .9]*u.deg) + + assert list(zip(idx1_0p05deg, idx2_0p05deg)) == [(0, 3)] + + idx1_1kpc, idx2_1kpc, d2d_1kpc, d3d_1kpc = search_around_3d(coo1, coo2, 1*u.kpc) + idx1_sm, idx2_sm, d2d_sm, d3d_sm = search_around_3d(coo1, coo2, 0.05*u.kpc) + + assert list(zip(idx1_1kpc, idx2_1kpc)) == [(0, 0), (0, 1), (0, 2), (1, 3)] + assert list(zip(idx1_sm, idx2_sm)) == [(0, 1), (0, 2)] + assert_allclose(d2d_sm, [2, 1]*u.deg) + + # Test for the non-matches, #4877 + coo1 = ICRS([4.1, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) + idx1, idx2, d2d, d3d = search_around_sky(coo1, coo2, 1*u.arcsec) + assert idx1.size == idx2.size == d2d.size == d3d.size == 0 + assert idx1.dtype == idx2.dtype == np.int + assert d2d.unit == u.deg + assert d3d.unit == u.kpc + idx1, idx2, d2d, d3d = search_around_3d(coo1, coo2, 1*u.m) + assert idx1.size == idx2.size == d2d.size == d3d.size == 0 + assert idx1.dtype == idx2.dtype == np.int + assert d2d.unit == u.deg + assert d3d.unit == u.kpc + + # Test when one or both of the coordinate arrays is empty, #4875 + empty = ICRS(ra=[] * u.degree, dec=[] * u.degree, distance=[] * u.kpc) + idx1, idx2, d2d, d3d = search_around_sky(empty, coo2, 1*u.arcsec) + assert idx1.size == idx2.size == d2d.size == d3d.size == 0 + assert idx1.dtype == idx2.dtype == np.int + assert d2d.unit == u.deg + assert d3d.unit == u.kpc + idx1, idx2, d2d, d3d = search_around_sky(coo1, empty, 1*u.arcsec) + assert idx1.size == idx2.size == d2d.size == d3d.size == 0 + assert idx1.dtype == idx2.dtype == np.int + assert d2d.unit == u.deg + assert d3d.unit == u.kpc + empty = ICRS(ra=[] * u.degree, dec=[] * u.degree, distance=[] * u.kpc) + idx1, idx2, d2d, d3d = search_around_sky(empty, empty[:], 1*u.arcsec) + assert idx1.size == idx2.size == d2d.size == d3d.size == 0 + assert idx1.dtype == idx2.dtype == np.int + assert d2d.unit == u.deg + assert d3d.unit == u.kpc + idx1, idx2, d2d, d3d = search_around_3d(empty, coo2, 1*u.m) + assert idx1.size == idx2.size == d2d.size == d3d.size == 0 + assert idx1.dtype == idx2.dtype == np.int + assert d2d.unit == u.deg + assert d3d.unit == u.kpc + idx1, idx2, d2d, d3d = search_around_3d(coo1, empty, 1*u.m) + assert idx1.size == idx2.size == d2d.size == d3d.size == 0 + assert idx1.dtype == idx2.dtype == np.int + assert d2d.unit == u.deg + assert d3d.unit == u.kpc + idx1, idx2, d2d, d3d = search_around_3d(empty, empty[:], 1*u.m) + assert idx1.size == idx2.size == d2d.size == d3d.size == 0 + assert idx1.dtype == idx2.dtype == np.int + assert d2d.unit == u.deg + assert d3d.unit == u.kpc + + # Test that input without distance units results in a + # 'dimensionless_unscaled' unit + cempty = SkyCoord(ra=[], dec=[], unit=u.deg) + idx1, idx2, d2d, d3d = search_around_3d(cempty, cempty[:], 1*u.m) + assert d2d.unit == u.deg + assert d3d.unit == u.dimensionless_unscaled + idx1, idx2, d2d, d3d = search_around_sky(cempty, cempty[:], 1*u.m) + assert d2d.unit == u.deg + assert d3d.unit == u.dimensionless_unscaled + + +@pytest.mark.skipif(str('not HAS_SCIPY')) +@pytest.mark.skipif(str('OLDER_SCIPY')) +def test_search_around_scalar(): + from astropy.coordinates import SkyCoord, Angle + + cat = SkyCoord([1, 2, 3], [-30, 45, 8], unit="deg") + target = SkyCoord('1.1 -30.1', unit="deg") + + with pytest.raises(ValueError) as excinfo: + cat.search_around_sky(target, Angle('2d')) + + # make sure the error message is *specific* to search_around_sky rather than + # generic as reported in #3359 + assert 'search_around_sky' in str(excinfo.value) + + with pytest.raises(ValueError) as excinfo: + cat.search_around_3d(target, Angle('2d')) + assert 'search_around_3d' in str(excinfo.value) + + +@pytest.mark.skipif(str('not HAS_SCIPY')) +@pytest.mark.skipif(str('OLDER_SCIPY')) +def test_match_catalog_empty(): + from astropy.coordinates import SkyCoord + + sc1 = SkyCoord(1, 2, unit="deg") + cat0 = SkyCoord([], [], unit="deg") + cat1 = SkyCoord([1.1], [2.1], unit="deg") + cat2 = SkyCoord([1.1, 3], [2.1, 5], unit="deg") + + sc1.match_to_catalog_sky(cat2) + sc1.match_to_catalog_3d(cat2) + + sc1.match_to_catalog_sky(cat1) + sc1.match_to_catalog_3d(cat1) + + with pytest.raises(ValueError) as excinfo: + sc1.match_to_catalog_sky(cat1[0]) + assert 'catalog' in str(excinfo.value) + with pytest.raises(ValueError) as excinfo: + sc1.match_to_catalog_3d(cat1[0]) + assert 'catalog' in str(excinfo.value) + + with pytest.raises(ValueError) as excinfo: + sc1.match_to_catalog_sky(cat0) + assert 'catalog' in str(excinfo.value) + with pytest.raises(ValueError) as excinfo: + sc1.match_to_catalog_3d(cat0) + assert 'catalog' in str(excinfo.value) diff --git a/astropy/coordinates/tests/test_matrix_utilities.py b/astropy/coordinates/tests/test_matrix_utilities.py new file mode 100644 index 0000000..22012d4 --- /dev/null +++ b/astropy/coordinates/tests/test_matrix_utilities.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +import numpy as np +from numpy.testing.utils import assert_allclose, assert_array_equal + +from ... import units as u +from ..matrix_utilities import rotation_matrix, angle_axis + + +def test_rotation_matrix(): + assert_array_equal(rotation_matrix(0*u.deg, 'x'), np.eye(3)) + + assert_allclose(rotation_matrix(90*u.deg, 'y'), [[0, 0, -1], + [0, 1, 0], + [1, 0, 0]], atol=1e-12) + + assert_allclose(rotation_matrix(-90*u.deg, 'z'), [[0, -1, 0], + [1, 0, 0], + [0, 0, 1]], atol=1e-12) + + assert_allclose(rotation_matrix(45*u.deg, 'x'), + rotation_matrix(45*u.deg, [1, 0, 0])) + assert_allclose(rotation_matrix(125*u.deg, 'y'), + rotation_matrix(125*u.deg, [0, 1, 0])) + assert_allclose(rotation_matrix(-30*u.deg, 'z'), + rotation_matrix(-30*u.deg, [0, 0, 1])) + + assert_allclose(np.dot(rotation_matrix(180*u.deg, [1, 1, 0]), [1, 0, 0]), + [0, 1, 0], atol=1e-12) + + # make sure it also works for very small angles + assert_allclose(rotation_matrix(0.000001*u.deg, 'x'), + rotation_matrix(0.000001*u.deg, [1, 0, 0])) + + +def test_angle_axis(): + m1 = rotation_matrix(35*u.deg, 'x') + an1, ax1 = angle_axis(m1) + + assert an1 - 35*u.deg < 1e-10*u.deg + assert_allclose(ax1, [1, 0, 0]) + + m2 = rotation_matrix(-89*u.deg, [1, 1, 0]) + an2, ax2 = angle_axis(m2) + + assert an2 - 89*u.deg < 1e-10*u.deg + assert_allclose(ax2, [-2**-0.5, -2**-0.5, 0]) diff --git a/astropy/coordinates/tests/test_name_resolve.py b/astropy/coordinates/tests/test_name_resolve.py new file mode 100644 index 0000000..89b8fbd --- /dev/null +++ b/astropy/coordinates/tests/test_name_resolve.py @@ -0,0 +1,161 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +""" +This module contains tests for the name resolve convenience module. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + + +import time + +import pytest +import numpy as np + +from ..name_resolve import (get_icrs_coordinates, NameResolveError, + sesame_database, _parse_response, sesame_url) +from ..sky_coordinate import SkyCoord +from ...extern.six.moves import urllib +from ...tests.helper import remote_data +from ... import units as u + +_cached_ngc3642 = dict() +_cached_ngc3642["simbad"] = """# NGC 3642 #Q22523669 +#=S=Simbad (via url): 1 +%@ 503952 +%I.0 NGC 3642 +%C.0 LIN +%C.N0 15.15.01.00 +%J 170.5750583 +59.0742417 = 11:22:18.01 +59:04:27.2 +%V z 1593 0.005327 [0.000060] D 2002LEDA.........0P +%D 1.673 1.657 75 (32767) (I) C 2006AJ....131.1163S +%T 5 =32800000 D 2011A&A...532A..74B +%#B 140 + + +#====Done (2013-Feb-12,16:37:11z)====""" + +_cached_ngc3642["vizier"] = """# NGC 3642 #Q22523677 +#=V=VizieR (local): 1 +%J 170.56 +59.08 = 11:22.2 +59:05 +%I.0 {NGC} 3642 + + + +#====Done (2013-Feb-12,16:37:42z)====""" + +_cached_ngc3642["all"] = """# ngc3642 #Q22523722 +#=S=Simbad (via url): 1 +%@ 503952 +%I.0 NGC 3642 +%C.0 LIN +%C.N0 15.15.01.00 +%J 170.5750583 +59.0742417 = 11:22:18.01 +59:04:27.2 +%V z 1593 0.005327 [0.000060] D 2002LEDA.........0P +%D 1.673 1.657 75 (32767) (I) C 2006AJ....131.1163S +%T 5 =32800000 D 2011A&A...532A..74B +%#B 140 + + +#=V=VizieR (local): 1 +%J 170.56 +59.08 = 11:22.2 +59:05 +%I.0 {NGC} 3642 + + +#!N=NED : *** Could not access the server *** + +#====Done (2013-Feb-12,16:39:48z)====""" + +_cached_castor = dict() +_cached_castor["all"] = """# castor #Q22524249 +#=S=Simbad (via url): 1 +%@ 983633 +%I.0 NAME CASTOR +%C.0 ** +%C.N0 12.13.00.00 +%J 113.649471640 +31.888282216 = 07:34:35.87 +31:53:17.8 +%J.E [34.72 25.95 0] A 2007A&A...474..653V +%P -191.45 -145.19 [3.95 2.95 0] A 2007A&A...474..653V +%X 64.12 [3.75] A 2007A&A...474..653V +%S A1V+A2Vm =0.0000D200.0030.0110000000100000 C 2001AJ....122.3466M +%#B 179 + +#!V=VizieR (local): No table found for: castor + +#!N=NED: ****object name not recognized by NED name interpreter +#!N=NED: ***Not recognized by NED: castor + + + +#====Done (2013-Feb-12,16:52:02z)====""" + +_cached_castor["simbad"] = """# castor #Q22524495 +#=S=Simbad (via url): 1 +%@ 983633 +%I.0 NAME CASTOR +%C.0 ** +%C.N0 12.13.00.00 +%J 113.649471640 +31.888282216 = 07:34:35.87 +31:53:17.8 +%J.E [34.72 25.95 0] A 2007A&A...474..653V +%P -191.45 -145.19 [3.95 2.95 0] A 2007A&A...474..653V +%X 64.12 [3.75] A 2007A&A...474..653V +%S A1V+A2Vm =0.0000D200.0030.0110000000100000 C 2001AJ....122.3466M +%#B 179 + + +#====Done (2013-Feb-12,17:00:39z)====""" + + +@remote_data +def test_names(): + + # First check that sesame is up + if urllib.request.urlopen("http://cdsweb.u-strasbg.fr/cgi-bin/nph-sesame").getcode() != 200: + pytest.skip("SESAME appears to be down, skipping test_name_resolve.py:test_names()...") + + with pytest.raises(NameResolveError): + get_icrs_coordinates("m87h34hhh") + + try: + icrs = get_icrs_coordinates("NGC 3642") + except NameResolveError: + ra, dec = _parse_response(_cached_ngc3642["all"]) + icrs = SkyCoord(ra=float(ra)*u.degree, dec=float(dec)*u.degree) + + icrs_true = SkyCoord(ra="11h 22m 18.014s", dec="59d 04m 27.27s") + + # use precision of only 1 decimal here and below because the result can + # change due to Sesame server-side changes. + np.testing.assert_almost_equal(icrs.ra.degree, icrs_true.ra.degree, 1) + np.testing.assert_almost_equal(icrs.dec.degree, icrs_true.dec.degree, 1) + + try: + icrs = get_icrs_coordinates("castor") + except NameResolveError: + ra, dec = _parse_response(_cached_castor["all"]) + icrs = SkyCoord(ra=float(ra)*u.degree, dec=float(dec)*u.degree) + + icrs_true = SkyCoord(ra="07h 34m 35.87s", dec="+31d 53m 17.8s") + np.testing.assert_almost_equal(icrs.ra.degree, icrs_true.ra.degree, 1) + np.testing.assert_almost_equal(icrs.dec.degree, icrs_true.dec.degree, 1) + + +@remote_data +@pytest.mark.parametrize(("name", "db_dict"), [('NGC 3642', _cached_ngc3642), + ('castor', _cached_castor)]) +def test_database_specify(name, db_dict): + # First check that at least some sesame mirror is up + for url in sesame_url.get(): + if urllib.request.urlopen(url).getcode() == 200: + break + else: + pytest.skip("All SESAME mirrors appear to be down, skipping " + "test_name_resolve.py:test_database_specify()...") + + for db in db_dict.keys(): + with sesame_database.set(db): + icrs = SkyCoord.from_name(name) + + time.sleep(1) diff --git a/astropy/coordinates/tests/test_pickle.py b/astropy/coordinates/tests/test_pickle.py new file mode 100644 index 0000000..3658a61 --- /dev/null +++ b/astropy/coordinates/tests/test_pickle.py @@ -0,0 +1,75 @@ +import pytest +import numpy as np + +from ...extern.six.moves import zip, cPickle as pickle +from ...coordinates import Longitude +from ... import coordinates as coord +from ...tests.helper import pickle_protocol, check_pickling_recovery # noqa + +# Can't test distances without scipy due to cosmology deps +try: + import scipy # pylint: disable=W0611 + HAS_SCIPY = True +except ImportError: + HAS_SCIPY = False + + +def test_basic(): + lon1 = Longitude(1.23, "radian", wrap_angle='180d') + s = pickle.dumps(lon1) + lon2 = pickle.loads(s) + + +def test_pickle_longitude_wrap_angle(): + a = Longitude(1.23, "radian", wrap_angle='180d') + s = pickle.dumps(a) + b = pickle.loads(s) + + assert a.rad == b.rad + assert a.wrap_angle == b.wrap_angle + + +_names = [coord.Angle, + coord.Distance, + coord.DynamicMatrixTransform, + coord.ICRS, + coord.Latitude, + coord.Longitude, + coord.StaticMatrixTransform, + ] + +_xfail = [False, + not HAS_SCIPY, + True, + True, + False, + True, + False] + +_args = [[0.0], + [], + [lambda *args: np.identity(3), coord.ICRS, coord.ICRS], + [0, 0], + [0], + [0], + [np.identity(3), coord.ICRS, coord.ICRS], + ] + +_kwargs = [{'unit': 'radian'}, + {'z': 0.23}, + {}, + {'unit': ['radian', 'radian']}, + {'unit': 'radian'}, + {'unit': 'radian'}, + {}, + ] + + +@pytest.mark.parametrize(("name", "args", "kwargs", "xfail"), + zip(_names, _args, _kwargs, _xfail)) +def test_simple_object(pickle_protocol, name, args, kwargs, xfail): + # Tests easily instantiated objects + if xfail: + pytest.xfail() + original = name(*args, **kwargs) + check_pickling_recovery(original, pickle_protocol) diff --git a/astropy/coordinates/tests/test_regression.py b/astropy/coordinates/tests/test_regression.py new file mode 100644 index 0000000..b996a09 --- /dev/null +++ b/astropy/coordinates/tests/test_regression.py @@ -0,0 +1,596 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +Regression tests for coordinates-related bugs that don't have an obvious other +place to live +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest +import numpy as np + +from ...extern import six + +from ... import units as u +from .. import (AltAz, EarthLocation, SkyCoord, get_sun, ICRS, CIRS, ITRS, + GeocentricTrueEcliptic, Longitude, Latitude, GCRS, HCRS, + get_moon, FK4, FK4NoETerms, BaseCoordinateFrame, + QuantityAttribute, SphericalRepresentation, + UnitSphericalRepresentation, CartesianRepresentation) +from ..sites import get_builtin_sites +from ...time import Time +from ...utils import iers +from ...table import Table + +from ...tests.helper import assert_quantity_allclose, catch_warnings, quantity_allclose +from .test_matching import HAS_SCIPY, OLDER_SCIPY + +try: + import yaml # pylint: disable=W0611 + HAS_YAML = True +except ImportError: + HAS_YAML = False + + +def test_regression_5085(): + """ + PR #5085 was put in place to fix the following issue. + + Issue: https://github.com/astropy/astropy/issues/5069 + At root was the transformation of Ecliptic coordinates with + non-scalar times. + """ + times = Time(["2015-08-28 03:30", "2015-09-05 10:30", "2015-09-15 18:35"]) + latitudes = Latitude([3.9807075, -5.00733806, 1.69539491]*u.deg) + longitudes = Longitude([311.79678613, 72.86626741, 199.58698226]*u.deg) + distances = u.Quantity([0.00243266, 0.0025424, 0.00271296]*u.au) + coo = GeocentricTrueEcliptic(lat=latitudes, + lon=longitudes, + distance=distances, equinox=times) + # expected result + ras = Longitude([310.50095400, 314.67109920, 319.56507428]*u.deg) + decs = Latitude([-18.25190443, -17.1556676, -15.71616522]*u.deg) + distances = u.Quantity([1.78309901, 1.710874, 1.61326649]*u.au) + expected_result = GCRS(ra=ras, dec=decs, + distance=distances, obstime="J2000").cartesian.xyz + actual_result = coo.transform_to(GCRS(obstime="J2000")).cartesian.xyz + assert_quantity_allclose(expected_result, actual_result) + + +def test_regression_3920(): + """ + Issue: https://github.com/astropy/astropy/issues/3920 + """ + loc = EarthLocation.from_geodetic(0*u.deg, 0*u.deg, 0) + time = Time('2010-1-1') + + aa = AltAz(location=loc, obstime=time) + sc = SkyCoord(10*u.deg, 3*u.deg) + assert sc.transform_to(aa).shape == tuple() + # That part makes sense: the input is a scalar so the output is too + + sc2 = SkyCoord(10*u.deg, 3*u.deg, 1*u.AU) + assert sc2.transform_to(aa).shape == tuple() + # in 3920 that assert fails, because the shape is (1,) + + # check that the same behavior occurs even if transform is from low-level classes + icoo = ICRS(sc.data) + icoo2 = ICRS(sc2.data) + assert icoo.transform_to(aa).shape == tuple() + assert icoo2.transform_to(aa).shape == tuple() + + +def test_regression_3938(): + """ + Issue: https://github.com/astropy/astropy/issues/3938 + """ + # Set up list of targets - we don't use `from_name` here to avoid + # remote_data requirements, but it does the same thing + # vega = SkyCoord.from_name('Vega') + vega = SkyCoord(279.23473479*u.deg, 38.78368896*u.deg) + # capella = SkyCoord.from_name('Capella') + capella = SkyCoord(79.17232794*u.deg, 45.99799147*u.deg) + # sirius = SkyCoord.from_name('Sirius') + sirius = SkyCoord(101.28715533*u.deg, -16.71611586*u.deg) + targets = [vega, capella, sirius] + + # Feed list of targets into SkyCoord + combined_coords = SkyCoord(targets) + + # Set up AltAz frame + time = Time('2012-01-01 00:00:00') + location = EarthLocation('10d', '45d', 0) + aa = AltAz(location=location, obstime=time) + + combined_coords.transform_to(aa) + # in 3938 the above yields ``UnitConversionError: '' (dimensionless) and 'pc' (length) are not convertible`` + + +def test_regression_3998(): + """ + Issue: https://github.com/astropy/astropy/issues/3998 + """ + time = Time('2012-01-01 00:00:00') + assert time.isscalar + + sun = get_sun(time) + assert sun.isscalar + # in 3998, the above yields False - `sun` is a length-1 vector + + assert sun.obstime is time + + +def test_regression_4033(): + """ + Issue: https://github.com/astropy/astropy/issues/4033 + """ + # alb = SkyCoord.from_name('Albireo') + alb = SkyCoord(292.68033548*u.deg, 27.95968007*u.deg) + alb_wdist = SkyCoord(alb, distance=133*u.pc) + + # de = SkyCoord.from_name('Deneb') + de = SkyCoord(310.35797975*u.deg, 45.28033881*u.deg) + de_wdist = SkyCoord(de, distance=802*u.pc) + + aa = AltAz(location=EarthLocation(lat=45*u.deg, lon=0*u.deg), obstime='2010-1-1') + deaa = de.transform_to(aa) + albaa = alb.transform_to(aa) + alb_wdistaa = alb_wdist.transform_to(aa) + de_wdistaa = de_wdist.transform_to(aa) + + # these work fine + sepnod = deaa.separation(albaa) + sepwd = deaa.separation(alb_wdistaa) + assert_quantity_allclose(sepnod, 22.2862*u.deg, rtol=1e-6) + assert_quantity_allclose(sepwd, 22.2862*u.deg, rtol=1e-6) + # parallax should be present when distance added + assert np.abs(sepnod - sepwd) > 1*u.marcsec + + # in 4033, the following fail with a recursion error + assert_quantity_allclose(de_wdistaa.separation(alb_wdistaa), 22.2862*u.deg, rtol=1e-3) + assert_quantity_allclose(alb_wdistaa.separation(deaa), 22.2862*u.deg, rtol=1e-3) + + +@pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy') +@pytest.mark.skipif(OLDER_SCIPY, reason='Scipy too old') +def test_regression_4082(): + """ + Issue: https://github.com/astropy/astropy/issues/4082 + """ + from .. import search_around_sky, search_around_3d + cat = SkyCoord([10.076, 10.00455], [18.54746, 18.54896], unit='deg') + search_around_sky(cat[0:1], cat, seplimit=u.arcsec * 60, storekdtree=False) + # in the issue, this raises a TypeError + + # also check 3d for good measure, although it's not really affected by this bug directly + cat3d = SkyCoord([10.076, 10.00455]*u.deg, [18.54746, 18.54896]*u.deg, distance=[0.1, 1.5]*u.kpc) + search_around_3d(cat3d[0:1], cat3d, 1*u.kpc, storekdtree=False) + + +def test_regression_4210(): + """ + Issue: https://github.com/astropy/astropy/issues/4210 + Related PR with actual change: https://github.com/astropy/astropy/pull/4211 + """ + crd = SkyCoord(0*u.deg, 0*u.deg, distance=1*u.AU) + ecl = crd.geocentrictrueecliptic + # bug was that "lambda", which at the time was the name of the geocentric + # ecliptic longitude, is a reserved keyword. So this just makes sure the + # new name is are all valid + ecl.lon + + # and for good measure, check the other ecliptic systems are all the same + # names for their attributes + from ..builtin_frames import ecliptic + for frame_name in ecliptic.__all__: + eclcls = getattr(ecliptic, frame_name) + eclobj = eclcls(1*u.deg, 2*u.deg, 3*u.AU) + + eclobj.lat + eclobj.lon + eclobj.distance + + +def test_regression_futuretimes_4302(): + """ + Checks that an error is not raised for future times not covered by IERS + tables (at least in a simple transform like CIRS->ITRS that simply requires + the UTC<->UT1 conversion). + + Relevant comment: https://github.com/astropy/astropy/pull/4302#discussion_r44836531 + """ + from ...utils.exceptions import AstropyWarning + + # this is an ugly hack to get the warning to show up even if it has already + # appeared + from ..builtin_frames import utils + if hasattr(utils, '__warningregistry__'): + utils.__warningregistry__.clear() + + with catch_warnings() as found_warnings: + future_time = Time('2511-5-1') + c = CIRS(1*u.deg, 2*u.deg, obstime=future_time) + c.transform_to(ITRS(obstime=future_time)) + + if not isinstance(iers.IERS_Auto.iers_table, iers.IERS_Auto): + saw_iers_warnings = False + for w in found_warnings: + if issubclass(w.category, AstropyWarning): + if '(some) times are outside of range covered by IERS table' in str(w.message): + saw_iers_warnings = True + break + assert saw_iers_warnings, 'Never saw IERS warning' + + +def test_regression_4996(): + # this part is the actual regression test + deltat = np.linspace(-12, 12, 1000)*u.hour + times = Time('2012-7-13 00:00:00') + deltat + suncoo = get_sun(times) + assert suncoo.shape == (len(times),) + + # and this is an additional test to make sure more complex arrays work + times2 = Time('2012-7-13 00:00:00') + deltat.reshape(10, 20, 5) + suncoo2 = get_sun(times2) + assert suncoo2.shape == times2.shape + + # this is intentionally not allclose - they should be *exactly* the same + assert np.all(suncoo.ra.ravel() == suncoo2.ra.ravel()) + + +def test_regression_4293(): + """Really just an extra test on FK4 no e, after finding that the units + were not always taken correctly. This test is against explicitly doing + the transformations on pp170 of Explanatory Supplement to the Astronomical + Almanac (Seidelmann, 2005). + + See https://github.com/astropy/astropy/pull/4293#issuecomment-234973086 + """ + # Check all over sky, but avoiding poles (note that FK4 did not ignore + # e terms within 10∘ of the poles... see p170 of explan.supp.). + ra, dec = np.meshgrid(np.arange(0, 359, 45), np.arange(-80, 81, 40)) + fk4 = FK4(ra.ravel() * u.deg, dec.ravel() * u.deg) + + Dc = -0.065838*u.arcsec + Dd = +0.335299*u.arcsec + # Dc * tan(obliquity), as given on p.170 + Dctano = -0.028553*u.arcsec + + fk4noe_dec = (fk4.dec - (Dd*np.cos(fk4.ra) - + Dc*np.sin(fk4.ra))*np.sin(fk4.dec) - + Dctano*np.cos(fk4.dec)) + fk4noe_ra = fk4.ra - (Dc*np.cos(fk4.ra) + + Dd*np.sin(fk4.ra)) / np.cos(fk4.dec) + + fk4noe = fk4.transform_to(FK4NoETerms) + # Tolerance here just set to how well the coordinates match, which is much + # better than the claimed accuracy of <1 mas for this first-order in + # v_earth/c approximation. + # Interestingly, if one divides by np.cos(fk4noe_dec) in the ra correction, + # the match becomes good to 2 μas. + assert_quantity_allclose(fk4noe.ra, fk4noe_ra, atol=11.*u.uas, rtol=0) + assert_quantity_allclose(fk4noe.dec, fk4noe_dec, atol=3.*u.uas, rtol=0) + + +def test_regression_4926(): + times = Time('2010-01-1') + np.arange(20)*u.day + green = get_builtin_sites()['greenwich'] + # this is the regression test + moon = get_moon(times, green) + + # this is an additional test to make sure the GCRS->ICRS transform works for complex shapes + moon.transform_to(ICRS()) + + # and some others to increase coverage of transforms + moon.transform_to(HCRS(obstime="J2000")) + moon.transform_to(HCRS(obstime=times)) + + +def test_regression_5209(): + "check that distances are not lost on SkyCoord init" + time = Time('2015-01-01') + moon = get_moon(time) + new_coord = SkyCoord([moon]) + assert_quantity_allclose(new_coord[0].distance, moon.distance) + + +def test_regression_5133(): + N = 1000 + np.random.seed(12345) + lon = np.random.uniform(-10, 10, N) * u.deg + lat = np.random.uniform(50, 52, N) * u.deg + alt = np.random.uniform(0, 10., N) * u.km + + time = Time('2010-1-1') + + objects = EarthLocation.from_geodetic(lon, lat, height=alt) + itrs_coo = objects.get_itrs(time) + + homes = [EarthLocation.from_geodetic(lon=-1 * u.deg, lat=52 * u.deg, height=h) + for h in (0, 1000, 10000)*u.km] + + altaz_frames = [AltAz(obstime=time, location=h) for h in homes] + altaz_coos = [itrs_coo.transform_to(f) for f in altaz_frames] + + # they should all be different + for coo in altaz_coos[1:]: + assert not quantity_allclose(coo.az, coo.az[0]) + assert not quantity_allclose(coo.alt, coo.alt[0]) + + +def test_itrs_vals_5133(): + time = Time('2010-1-1') + el = EarthLocation.from_geodetic(lon=20*u.deg, lat=45*u.deg, height=0*u.km) + + lons = [20, 30, 20]*u.deg + lats = [44, 45, 45]*u.deg + alts = [0, 0, 10]*u.km + coos = [EarthLocation.from_geodetic(lon, lat, height=alt).get_itrs(time) + for lon, lat, alt in zip(lons, lats, alts)] + + aaf = AltAz(obstime=time, location=el) + aacs = [coo.transform_to(aaf) for coo in coos] + + assert all([coo.isscalar for coo in aacs]) + + # the ~1 arcsec tolerance is b/c aberration makes it not exact + assert_quantity_allclose(aacs[0].az, 180*u.deg, atol=1*u.arcsec) + assert aacs[0].alt < 0*u.deg + assert aacs[0].distance > 50*u.km + + # it should *not* actually be 90 degrees, b/c constant latitude is not + # straight east anywhere except the equator... but should be close-ish + assert_quantity_allclose(aacs[1].az, 90*u.deg, atol=5*u.deg) + assert aacs[1].alt < 0*u.deg + assert aacs[1].distance > 50*u.km + + assert_quantity_allclose(aacs[2].alt, 90*u.deg, atol=1*u.arcsec) + assert_quantity_allclose(aacs[2].distance, 10*u.km) + + +def test_regression_simple_5133(): + t = Time('J2010') + obj = EarthLocation(-1*u.deg, 52*u.deg, height=[100., 0.]*u.km) + home = EarthLocation(-1*u.deg, 52*u.deg, height=10.*u.km) + aa = obj.get_itrs(t).transform_to(AltAz(obstime=t, location=home)) + + # az is more-or-less undefined for straight up or down + assert_quantity_allclose(aa.alt, [90, -90]*u.deg, rtol=1e-5) + assert_quantity_allclose(aa.distance, [90, 10]*u.km) + + +def test_regression_5743(): + sc = SkyCoord([5, 10], [20, 30], unit=u.deg, + obstime=['2017-01-01T00:00', '2017-01-01T00:10']) + assert sc[0].obstime.shape == tuple() + + +def test_regression_5889_5890(): + # ensure we can represent all Representations and transform to ND frames + greenwich = EarthLocation( + *u.Quantity([3980608.90246817, -102.47522911, 4966861.27310067], + unit=u.m)) + times = Time("2017-03-20T12:00:00") + np.linspace(-2, 2, 3)*u.hour + moon = get_moon(times, location=greenwich) + targets = SkyCoord([350.7*u.deg, 260.7*u.deg], [18.4*u.deg, 22.4*u.deg]) + targs2d = targets[:, np.newaxis] + targs2d.transform_to(moon) + + +def test_regression_6236(): + # sunpy changes its representation upon initialisation of a frame, + # including via `realize_frame`. Ensure this works. + class MyFrame(BaseCoordinateFrame): + default_representation = CartesianRepresentation + my_attr = QuantityAttribute(default=0, unit=u.m) + + class MySpecialFrame(MyFrame): + def __init__(self, *args, **kwargs): + _rep_kwarg = kwargs.get('representation', None) + super(MyFrame, self).__init__(*args, **kwargs) + if not _rep_kwarg: + self.representation = self.default_representation + self._data = self.data.represent_as(self.representation) + + rep1 = UnitSphericalRepresentation([0., 1]*u.deg, [2., 3.]*u.deg) + rep2 = SphericalRepresentation([10., 11]*u.deg, [12., 13.]*u.deg, + [14., 15.]*u.kpc) + mf1 = MyFrame(rep1, my_attr=1.*u.km) + mf2 = mf1.realize_frame(rep2) + # Normally, data is stored as is, but the representation gets set to a + # default, even if a different representation instance was passed in. + # realize_frame should do the same. Just in case, check attrs are passed. + assert mf1.data is rep1 + assert mf2.data is rep2 + assert mf1.representation is CartesianRepresentation + assert mf2.representation is CartesianRepresentation + assert mf2.my_attr == mf1.my_attr + # It should be independent of whether I set the reprensentation explicitly + mf3 = MyFrame(rep1, my_attr=1.*u.km, representation='unitspherical') + mf4 = mf3.realize_frame(rep2) + assert mf3.data is rep1 + assert mf4.data is rep2 + assert mf3.representation is UnitSphericalRepresentation + assert mf4.representation is CartesianRepresentation + assert mf4.my_attr == mf3.my_attr + # This should be enough to help sunpy, but just to be sure, a test + # even closer to what is done there, i.e., transform the representation. + msf1 = MySpecialFrame(rep1, my_attr=1.*u.km) + msf2 = msf1.realize_frame(rep2) + assert msf1.data is not rep1 # Gets transformed to Cartesian. + assert msf2.data is not rep2 + assert type(msf1.data) is CartesianRepresentation + assert type(msf2.data) is CartesianRepresentation + assert msf1.representation is CartesianRepresentation + assert msf2.representation is CartesianRepresentation + assert msf2.my_attr == msf1.my_attr + # And finally a test where the input is not transformed. + msf3 = MySpecialFrame(rep1, my_attr=1.*u.km, + representation='unitspherical') + msf4 = msf3.realize_frame(rep2) + assert msf3.data is rep1 + assert msf4.data is not rep2 + assert msf3.representation is UnitSphericalRepresentation + assert msf4.representation is CartesianRepresentation + assert msf4.my_attr == msf3.my_attr + + +@pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy') +@pytest.mark.skipif(OLDER_SCIPY, reason='Scipy too old') +def test_regression_6347(): + sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg) + sc2 = SkyCoord([1.1, 2.1]*u.deg, [3.1, 4.1]*u.deg) + sc0 = sc1[:0] + + idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_sky(sc2, 10*u.arcmin) + idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_sky(sc2, 1*u.arcmin) + idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_sky(sc2, 10*u.arcmin) + + assert len(d2d_10) == 2 + + assert len(d2d_0) == 0 + assert type(d2d_0) is type(d2d_10) + + assert len(d2d_1) == 0 + assert type(d2d_1) is type(d2d_10) + + +@pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy') +@pytest.mark.skipif(OLDER_SCIPY, reason='Scipy too old') +def test_regression_6347_3d(): + sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, [5, 6]*u.kpc) + sc2 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, [5.1, 6.1]*u.kpc) + sc0 = sc1[:0] + + idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_3d(sc2, 500*u.pc) + idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_3d(sc2, 50*u.pc) + idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_3d(sc2, 500*u.pc) + + assert len(d2d_10) > 0 + + assert len(d2d_0) == 0 + assert type(d2d_0) is type(d2d_10) + + assert len(d2d_1) == 0 + assert type(d2d_1) is type(d2d_10) + +def test_regression_6300(): + """Check that importing old frame attribute names from astropy.coordinates + still works. See comments at end of #6300 + """ + from ...utils.exceptions import AstropyDeprecationWarning + from .. import CartesianRepresentation + from .. import (TimeFrameAttribute, QuantityFrameAttribute, + CartesianRepresentationFrameAttribute) + + with catch_warnings() as found_warnings: + attr = TimeFrameAttribute(default=Time("J2000")) + + for w in found_warnings: + if issubclass(w.category, AstropyDeprecationWarning): + break + else: + assert False, "Deprecation warning not raised" + + with catch_warnings() as found_warnings: + attr = QuantityFrameAttribute(default=5*u.km) + + for w in found_warnings: + if issubclass(w.category, AstropyDeprecationWarning): + break + else: + assert False, "Deprecation warning not raised" + + with catch_warnings() as found_warnings: + attr = CartesianRepresentationFrameAttribute( + default=CartesianRepresentation([5,6,7]*u.kpc)) + + for w in found_warnings: + if issubclass(w.category, AstropyDeprecationWarning): + break + else: + assert False, "Deprecation warning not raised" + + +def test_gcrs_itrs_cartesian_repr(): + # issue 6436: transformation failed if coordinate representation was + # Cartesian + gcrs = GCRS(CartesianRepresentation((859.07256, -4137.20368, 5295.56871), + unit='km'), representation='cartesian') + gcrs.transform_to(ITRS) + + +@pytest.mark.skipif('not HAS_YAML') +def test_regression_6446(): + # this succeeds even before 6446: + sc1 = SkyCoord([1, 2], [3, 4], unit='deg') + t1 = Table([sc1]) + sio1 = six.StringIO() + t1.write(sio1, format='ascii.ecsv') + + # but this fails due to the 6446 bug + c1 = SkyCoord(1, 3, unit='deg') + c2 = SkyCoord(2, 4, unit='deg') + sc2 = SkyCoord([c1, c2]) + t2 = Table([sc2]) + sio2 = six.StringIO() + t2.write(sio2, format='ascii.ecsv') + + assert sio1.getvalue() == sio2.getvalue() + + +def test_regression_6448(): + """ + This tests the more narrow problem reported in 6446 that 6448 is meant to + fix. `test_regression_6446` also covers this, but this test is provided + so that this is still tested even if YAML isn't installed. + """ + sc1 = SkyCoord([1, 2], [3, 4], unit='deg') + # this should always succeed even prior to 6448 + assert sc1.galcen_v_sun is None + + c1 = SkyCoord(1, 3, unit='deg') + c2 = SkyCoord(2, 4, unit='deg') + sc2 = SkyCoord([c1, c2]) + # without 6448 this fails + assert sc2.galcen_v_sun is None + + +def test_regression_6597(): + frame_name = 'galactic' + c1 = SkyCoord(1, 3, unit='deg', frame=frame_name) + c2 = SkyCoord(2, 4, unit='deg', frame=frame_name) + sc1 = SkyCoord([c1, c2]) + + assert sc1.frame.name == frame_name + + +def test_regression_6597_2(): + """ + This tests the more subtle flaw that #6597 indirectly uncovered: that even + in the case that the frames are ra/dec, they still might be the wrong *kind* + """ + frame = FK4(equinox='J1949') + c1 = SkyCoord(1, 3, unit='deg', frame=frame) + c2 = SkyCoord(2, 4, unit='deg', frame=frame) + sc1 = SkyCoord([c1, c2]) + + assert sc1.frame.name == frame.name + + +def test_regression_6697(): + """ + Test for regression of a bug in get_gcrs_posvel that introduced errors at the 1m/s level. + + Comparison data is derived from calculation in PINT + https://github.com/nanograv/PINT/blob/master/pint/erfautils.py + """ + pint_vels = CartesianRepresentation(*(348.63632871, -212.31704928, -0.60154936), unit=u.m/u.s) + location = EarthLocation(*(5327448.9957829, -1718665.73869569, 3051566.90295403), unit=u.m) + t = Time(2458036.161966612, format='jd', scale='utc') + obsgeopos, obsgeovel = location.get_gcrs_posvel(t) + delta = (obsgeovel-pint_vels).norm() + assert delta < 1*u.cm/u.s diff --git a/astropy/coordinates/tests/test_representation.py b/astropy/coordinates/tests/test_representation.py new file mode 100644 index 0000000..f818945 --- /dev/null +++ b/astropy/coordinates/tests/test_representation.py @@ -0,0 +1,1365 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from copy import deepcopy +from collections import OrderedDict + +import pytest +import numpy as np +from numpy.testing import assert_allclose + +from ... import units as u +from ...tests.helper import (assert_quantity_allclose as + assert_allclose_quantity) +from ...utils import isiterable +from ...utils.compat import NUMPY_LT_1_14 +from ..angles import Longitude, Latitude, Angle +from ..distances import Distance +from ..representation import (REPRESENTATION_CLASSES, + DIFFERENTIAL_CLASSES, + BaseRepresentation, + SphericalRepresentation, + UnitSphericalRepresentation, + SphericalCosLatDifferential, + CartesianRepresentation, + CylindricalRepresentation, + PhysicsSphericalRepresentation, + CartesianDifferential, + SphericalDifferential, + _combine_xyz) + + +# Preserve the original REPRESENTATION_CLASSES dict so that importing +# the test file doesn't add a persistent test subclass (LogDRepresentation) +def setup_function(func): + func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES) + + +def teardown_function(func): + REPRESENTATION_CLASSES.clear() + REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG) + + +class TestSphericalRepresentation(object): + + def test_name(self): + assert SphericalRepresentation.get_name() == 'spherical' + assert SphericalRepresentation.get_name() in REPRESENTATION_CLASSES + + def test_empty_init(self): + with pytest.raises(TypeError) as exc: + s = SphericalRepresentation() + + def test_init_quantity(self): + + s3 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc) + assert s3.lon == 8. * u.hourangle + assert s3.lat == 5. * u.deg + assert s3.distance == 10 * u.kpc + + assert isinstance(s3.lon, Longitude) + assert isinstance(s3.lat, Latitude) + assert isinstance(s3.distance, Distance) + + def test_init_lonlat(self): + + s2 = SphericalRepresentation(Longitude(8, u.hour), + Latitude(5, u.deg), + Distance(10, u.kpc)) + + assert s2.lon == 8. * u.hourangle + assert s2.lat == 5. * u.deg + assert s2.distance == 10. * u.kpc + + assert isinstance(s2.lon, Longitude) + assert isinstance(s2.lat, Latitude) + assert isinstance(s2.distance, Distance) + + # also test that wrap_angle is preserved + s3 = SphericalRepresentation(Longitude(-90, u.degree, + wrap_angle=180*u.degree), + Latitude(-45, u.degree), + Distance(1., u.Rsun)) + assert s3.lon == -90. * u.degree + assert s3.lon.wrap_angle == 180 * u.degree + + def test_init_array(self): + + s1 = SphericalRepresentation(lon=[8, 9] * u.hourangle, + lat=[5, 6] * u.deg, + distance=[1, 2] * u.kpc) + + assert_allclose(s1.lon.degree, [120, 135]) + assert_allclose(s1.lat.degree, [5, 6]) + assert_allclose(s1.distance.kpc, [1, 2]) + + assert isinstance(s1.lon, Longitude) + assert isinstance(s1.lat, Latitude) + assert isinstance(s1.distance, Distance) + + def test_init_array_nocopy(self): + + lon = Longitude([8, 9] * u.hourangle) + lat = Latitude([5, 6] * u.deg) + distance = Distance([1, 2] * u.kpc) + + s1 = SphericalRepresentation(lon=lon, lat=lat, distance=distance, copy=False) + + lon[:] = [1, 2] * u.rad + lat[:] = [3, 4] * u.arcmin + distance[:] = [8, 9] * u.Mpc + + assert_allclose_quantity(lon, s1.lon) + assert_allclose_quantity(lat, s1.lat) + assert_allclose_quantity(distance, s1.distance) + + def test_init_float32_array(self): + """Regression test against #2983""" + lon = Longitude(np.float32([1., 2.]), u.degree) + lat = Latitude(np.float32([3., 4.]), u.degree) + s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False) + assert s1.lon.dtype == np.float32 + assert s1.lat.dtype == np.float32 + assert s1._values['lon'].dtype == np.float32 + assert s1._values['lat'].dtype == np.float32 + + def test_reprobj(self): + + s1 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc) + + s2 = SphericalRepresentation.from_representation(s1) + + assert_allclose_quantity(s2.lon, 8. * u.hourangle) + assert_allclose_quantity(s2.lat, 5. * u.deg) + assert_allclose_quantity(s2.distance, 10 * u.kpc) + + def test_broadcasting(self): + + s1 = SphericalRepresentation(lon=[8, 9] * u.hourangle, + lat=[5, 6] * u.deg, + distance=10 * u.kpc) + + assert_allclose_quantity(s1.lon, [120, 135] * u.degree) + assert_allclose_quantity(s1.lat, [5, 6] * u.degree) + assert_allclose_quantity(s1.distance, [10, 10] * u.kpc) + + def test_broadcasting_mismatch(self): + + with pytest.raises(ValueError) as exc: + s1 = SphericalRepresentation(lon=[8, 9, 10] * u.hourangle, + lat=[5, 6] * u.deg, + distance=[1, 2] * u.kpc) + assert exc.value.args[0] == "Input parameters lon, lat, and distance cannot be broadcast" + + def test_readonly(self): + + s1 = SphericalRepresentation(lon=8 * u.hourangle, + lat=5 * u.deg, + distance=1. * u.kpc) + + with pytest.raises(AttributeError): + s1.lon = 1. * u.deg + + with pytest.raises(AttributeError): + s1.lat = 1. * u.deg + + with pytest.raises(AttributeError): + s1.distance = 1. * u.kpc + + def test_getitem_len_iterable(self): + + s = SphericalRepresentation(lon=np.arange(10) * u.deg, + lat=-np.arange(10) * u.deg, + distance=1 * u.kpc) + + s_slc = s[2:8:2] + + assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg) + assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg) + assert_allclose_quantity(s_slc.distance, [1, 1, 1] * u.kpc) + + assert len(s) == 10 + assert isiterable(s) + + def test_getitem_len_iterable_scalar(self): + + s = SphericalRepresentation(lon=1 * u.deg, + lat=-2 * u.deg, + distance=3 * u.kpc) + + with pytest.raises(TypeError): + s_slc = s[0] + with pytest.raises(TypeError): + len(s) + assert not isiterable(s) + + +class TestUnitSphericalRepresentation(object): + + def test_name(self): + assert UnitSphericalRepresentation.get_name() == 'unitspherical' + assert UnitSphericalRepresentation.get_name() in REPRESENTATION_CLASSES + + def test_empty_init(self): + with pytest.raises(TypeError) as exc: + s = UnitSphericalRepresentation() + + def test_init_quantity(self): + + s3 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg) + assert s3.lon == 8. * u.hourangle + assert s3.lat == 5. * u.deg + + assert isinstance(s3.lon, Longitude) + assert isinstance(s3.lat, Latitude) + + def test_init_lonlat(self): + + s2 = UnitSphericalRepresentation(Longitude(8, u.hour), + Latitude(5, u.deg)) + + assert s2.lon == 8. * u.hourangle + assert s2.lat == 5. * u.deg + + assert isinstance(s2.lon, Longitude) + assert isinstance(s2.lat, Latitude) + + def test_init_array(self): + + s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle, + lat=[5, 6] * u.deg) + + assert_allclose(s1.lon.degree, [120, 135]) + assert_allclose(s1.lat.degree, [5, 6]) + + assert isinstance(s1.lon, Longitude) + assert isinstance(s1.lat, Latitude) + + def test_init_array_nocopy(self): + + lon = Longitude([8, 9] * u.hourangle) + lat = Latitude([5, 6] * u.deg) + + s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False) + + lon[:] = [1, 2] * u.rad + lat[:] = [3, 4] * u.arcmin + + assert_allclose_quantity(lon, s1.lon) + assert_allclose_quantity(lat, s1.lat) + + def test_reprobj(self): + + s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg) + + s2 = UnitSphericalRepresentation.from_representation(s1) + + assert_allclose_quantity(s2.lon, 8. * u.hourangle) + assert_allclose_quantity(s2.lat, 5. * u.deg) + + def test_broadcasting(self): + + s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle, + lat=[5, 6] * u.deg) + + assert_allclose_quantity(s1.lon, [120, 135] * u.degree) + assert_allclose_quantity(s1.lat, [5, 6] * u.degree) + + def test_broadcasting_mismatch(self): + + with pytest.raises(ValueError) as exc: + s1 = UnitSphericalRepresentation(lon=[8, 9, 10] * u.hourangle, + lat=[5, 6] * u.deg) + assert exc.value.args[0] == "Input parameters lon and lat cannot be broadcast" + + def test_readonly(self): + + s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, + lat=5 * u.deg) + + with pytest.raises(AttributeError): + s1.lon = 1. * u.deg + + with pytest.raises(AttributeError): + s1.lat = 1. * u.deg + + def test_getitem(self): + + s = UnitSphericalRepresentation(lon=np.arange(10) * u.deg, + lat=-np.arange(10) * u.deg) + + s_slc = s[2:8:2] + + assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg) + assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg) + + def test_getitem_scalar(self): + + s = UnitSphericalRepresentation(lon=1 * u.deg, + lat=-2 * u.deg) + + with pytest.raises(TypeError): + s_slc = s[0] + + +class TestPhysicsSphericalRepresentation(object): + + def test_name(self): + assert PhysicsSphericalRepresentation.get_name() == 'physicsspherical' + assert PhysicsSphericalRepresentation.get_name() in REPRESENTATION_CLASSES + + def test_empty_init(self): + with pytest.raises(TypeError) as exc: + s = PhysicsSphericalRepresentation() + + def test_init_quantity(self): + + s3 = PhysicsSphericalRepresentation(phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc) + assert s3.phi == 8. * u.hourangle + assert s3.theta == 5. * u.deg + assert s3.r == 10 * u.kpc + + assert isinstance(s3.phi, Angle) + assert isinstance(s3.theta, Angle) + assert isinstance(s3.r, Distance) + + def test_init_phitheta(self): + + s2 = PhysicsSphericalRepresentation(Angle(8, u.hour), + Angle(5, u.deg), + Distance(10, u.kpc)) + + assert s2.phi == 8. * u.hourangle + assert s2.theta == 5. * u.deg + assert s2.r == 10. * u.kpc + + assert isinstance(s2.phi, Angle) + assert isinstance(s2.theta, Angle) + assert isinstance(s2.r, Distance) + + def test_init_array(self): + + s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle, + theta=[5, 6] * u.deg, + r=[1, 2] * u.kpc) + + assert_allclose(s1.phi.degree, [120, 135]) + assert_allclose(s1.theta.degree, [5, 6]) + assert_allclose(s1.r.kpc, [1, 2]) + + assert isinstance(s1.phi, Angle) + assert isinstance(s1.theta, Angle) + assert isinstance(s1.r, Distance) + + def test_init_array_nocopy(self): + + phi = Angle([8, 9] * u.hourangle) + theta = Angle([5, 6] * u.deg) + r = Distance([1, 2] * u.kpc) + + s1 = PhysicsSphericalRepresentation(phi=phi, theta=theta, r=r, copy=False) + + phi[:] = [1, 2] * u.rad + theta[:] = [3, 4] * u.arcmin + r[:] = [8, 9] * u.Mpc + + assert_allclose_quantity(phi, s1.phi) + assert_allclose_quantity(theta, s1.theta) + assert_allclose_quantity(r, s1.r) + + def test_reprobj(self): + + s1 = PhysicsSphericalRepresentation(phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc) + + s2 = PhysicsSphericalRepresentation.from_representation(s1) + + assert_allclose_quantity(s2.phi, 8. * u.hourangle) + assert_allclose_quantity(s2.theta, 5. * u.deg) + assert_allclose_quantity(s2.r, 10 * u.kpc) + + def test_broadcasting(self): + + s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle, + theta=[5, 6] * u.deg, + r=10 * u.kpc) + + assert_allclose_quantity(s1.phi, [120, 135] * u.degree) + assert_allclose_quantity(s1.theta, [5, 6] * u.degree) + assert_allclose_quantity(s1.r, [10, 10] * u.kpc) + + def test_broadcasting_mismatch(self): + + with pytest.raises(ValueError) as exc: + s1 = PhysicsSphericalRepresentation(phi=[8, 9, 10] * u.hourangle, + theta=[5, 6] * u.deg, + r=[1, 2] * u.kpc) + assert exc.value.args[0] == "Input parameters phi, theta, and r cannot be broadcast" + + def test_readonly(self): + + s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle, + theta=[5, 6] * u.deg, + r=[10, 20] * u.kpc) + + with pytest.raises(AttributeError): + s1.phi = 1. * u.deg + + with pytest.raises(AttributeError): + s1.theta = 1. * u.deg + + with pytest.raises(AttributeError): + s1.r = 1. * u.kpc + + def test_getitem(self): + + s = PhysicsSphericalRepresentation(phi=np.arange(10) * u.deg, + theta=np.arange(5, 15) * u.deg, + r=1 * u.kpc) + + s_slc = s[2:8:2] + + assert_allclose_quantity(s_slc.phi, [2, 4, 6] * u.deg) + assert_allclose_quantity(s_slc.theta, [7, 9, 11] * u.deg) + assert_allclose_quantity(s_slc.r, [1, 1, 1] * u.kpc) + + def test_getitem_scalar(self): + + s = PhysicsSphericalRepresentation(phi=1 * u.deg, + theta=2 * u.deg, + r=3 * u.kpc) + + with pytest.raises(TypeError): + s_slc = s[0] + + +class TestCartesianRepresentation(object): + + def test_name(self): + assert CartesianRepresentation.get_name() == 'cartesian' + assert CartesianRepresentation.get_name() in REPRESENTATION_CLASSES + + def test_empty_init(self): + with pytest.raises(TypeError) as exc: + s = CartesianRepresentation() + + def test_init_quantity(self): + + s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) + + assert s1.x.unit is u.kpc + assert s1.y.unit is u.kpc + assert s1.z.unit is u.kpc + + assert_allclose(s1.x.value, 1) + assert_allclose(s1.y.value, 2) + assert_allclose(s1.z.value, 3) + + def test_init_singleunit(self): + + s1 = CartesianRepresentation(x=1, y=2, z=3, unit=u.kpc) + + assert s1.x.unit is u.kpc + assert s1.y.unit is u.kpc + assert s1.z.unit is u.kpc + + assert_allclose(s1.x.value, 1) + assert_allclose(s1.y.value, 2) + assert_allclose(s1.z.value, 3) + + def test_init_array(self): + + s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc, + y=[2, 3, 4] * u.Mpc, + z=[3, 4, 5] * u.kpc) + + assert s1.x.unit is u.pc + assert s1.y.unit is u.Mpc + assert s1.z.unit is u.kpc + + assert_allclose(s1.x.value, [1, 2, 3]) + assert_allclose(s1.y.value, [2, 3, 4]) + assert_allclose(s1.z.value, [3, 4, 5]) + + def test_init_one_array(self): + + s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc) + + assert s1.x.unit is u.pc + assert s1.y.unit is u.pc + assert s1.z.unit is u.pc + + assert_allclose(s1.x.value, 1) + assert_allclose(s1.y.value, 2) + assert_allclose(s1.z.value, 3) + + r = np.arange(27.).reshape(3, 3, 3) * u.kpc + s2 = CartesianRepresentation(r, xyz_axis=0) + assert s2.shape == (3, 3) + assert s2.x.unit == u.kpc + assert np.all(s2.x == r[0]) + assert np.all(s2.xyz == r) + assert np.all(s2.get_xyz(xyz_axis=0) == r) + s3 = CartesianRepresentation(r, xyz_axis=1) + assert s3.shape == (3, 3) + assert np.all(s3.x == r[:, 0]) + assert np.all(s3.y == r[:, 1]) + assert np.all(s3.z == r[:, 2]) + assert np.all(s3.get_xyz(xyz_axis=1) == r) + s4 = CartesianRepresentation(r, xyz_axis=2) + assert s4.shape == (3, 3) + assert np.all(s4.x == r[:, :, 0]) + assert np.all(s4.get_xyz(xyz_axis=2) == r) + s5 = CartesianRepresentation(r, unit=u.pc) + assert s5.x.unit == u.pc + assert np.all(s5.xyz == r) + s6 = CartesianRepresentation(r.value, unit=u.pc, xyz_axis=2) + assert s6.x.unit == u.pc + assert np.all(s6.get_xyz(xyz_axis=2).value == r.value) + + def test_init_one_array_size_fail(self): + with pytest.raises(ValueError) as exc: + CartesianRepresentation(x=[1, 2, 3, 4] * u.pc) + assert exc.value.args[0].startswith("too many values to unpack") + + def test_init_xyz_but_more_than_one_array_fail(self): + with pytest.raises(ValueError) as exc: + CartesianRepresentation(x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.pc, + z=[3, 4, 5] * u.pc, xyz_axis=0) + assert 'xyz_axis should only be set' in str(exc) + + def test_init_one_array_yz_fail(self): + with pytest.raises(ValueError) as exc: + CartesianRepresentation(x=[1, 2, 3, 4] * u.pc, y=[1, 2] * u.pc) + assert exc.value.args[0] == ("x, y, and z are required to instantiate " + "CartesianRepresentation") + + def test_init_array_nocopy(self): + + x = [8, 9, 10] * u.pc + y = [5, 6, 7] * u.Mpc + z = [2, 3, 4] * u.kpc + + s1 = CartesianRepresentation(x=x, y=y, z=z, copy=False) + + x[:] = [1, 2, 3] * u.kpc + y[:] = [9, 9, 8] * u.kpc + z[:] = [1, 2, 1] * u.kpc + + assert_allclose_quantity(x, s1.x) + assert_allclose_quantity(y, s1.y) + assert_allclose_quantity(z, s1.z) + + def test_reprobj(self): + + s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) + + s2 = CartesianRepresentation.from_representation(s1) + + assert s2.x == 1 * u.kpc + assert s2.y == 2 * u.kpc + assert s2.z == 3 * u.kpc + + def test_broadcasting(self): + + s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=5 * u.kpc) + + assert s1.x.unit == u.kpc + assert s1.y.unit == u.kpc + assert s1.z.unit == u.kpc + + assert_allclose(s1.x.value, [1, 2]) + assert_allclose(s1.y.value, [3, 4]) + assert_allclose(s1.z.value, [5, 5]) + + def test_broadcasting_mismatch(self): + + with pytest.raises(ValueError) as exc: + s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6, 7] * u.kpc) + assert exc.value.args[0] == "Input parameters x, y, and z cannot be broadcast" + + def test_readonly(self): + + s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) + + with pytest.raises(AttributeError): + s1.x = 1. * u.kpc + + with pytest.raises(AttributeError): + s1.y = 1. * u.kpc + + with pytest.raises(AttributeError): + s1.z = 1. * u.kpc + + def test_xyz(self): + + s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) + + assert isinstance(s1.xyz, u.Quantity) + assert s1.xyz.unit is u.kpc + + assert_allclose(s1.xyz.value, [1, 2, 3]) + + def test_unit_mismatch(self): + + q_len = u.Quantity([1], u.km) + q_nonlen = u.Quantity([1], u.kg) + + with pytest.raises(u.UnitsError) as exc: + s1 = CartesianRepresentation(x=q_nonlen, y=q_len, z=q_len) + assert exc.value.args[0] == "x, y, and z should have matching physical types" + + with pytest.raises(u.UnitsError) as exc: + s1 = CartesianRepresentation(x=q_len, y=q_nonlen, z=q_len) + assert exc.value.args[0] == "x, y, and z should have matching physical types" + + with pytest.raises(u.UnitsError) as exc: + s1 = CartesianRepresentation(x=q_len, y=q_len, z=q_nonlen) + assert exc.value.args[0] == "x, y, and z should have matching physical types" + + def test_unit_non_length(self): + + s1 = CartesianRepresentation(x=1 * u.kg, y=2 * u.kg, z=3 * u.kg) + + s2 = CartesianRepresentation(x=1 * u.km / u.s, y=2 * u.km / u.s, z=3 * u.km / u.s) + + banana = u.def_unit('banana') + s3 = CartesianRepresentation(x=1 * banana, y=2 * banana, z=3 * banana) + + def test_getitem(self): + + s = CartesianRepresentation(x=np.arange(10) * u.m, + y=-np.arange(10) * u.m, + z=3 * u.km) + + s_slc = s[2:8:2] + + assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m) + assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m) + assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km) + + def test_getitem_scalar(self): + + s = CartesianRepresentation(x=1 * u.m, + y=-2 * u.m, + z=3 * u.km) + + with pytest.raises(TypeError): + s_slc = s[0] + + def test_transform(self): + + s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6] * u.kpc) + + matrix = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + + s2 = s1.transform(matrix) + + assert_allclose(s2.x.value, [1 * 1 + 2 * 3 + 3 * 5, 1 * 2 + 2 * 4 + 3 * 6]) + assert_allclose(s2.y.value, [4 * 1 + 5 * 3 + 6 * 5, 4 * 2 + 5 * 4 + 6 * 6]) + assert_allclose(s2.z.value, [7 * 1 + 8 * 3 + 9 * 5, 7 * 2 + 8 * 4 + 9 * 6]) + + assert s2.x.unit is u.kpc + assert s2.y.unit is u.kpc + assert s2.z.unit is u.kpc + + +class TestCylindricalRepresentation(object): + + def test_name(self): + assert CylindricalRepresentation.get_name() == 'cylindrical' + assert CylindricalRepresentation.get_name() in REPRESENTATION_CLASSES + + def test_empty_init(self): + with pytest.raises(TypeError) as exc: + s = CylindricalRepresentation() + + def test_init_quantity(self): + + s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc) + + assert s1.rho.unit is u.kpc + assert s1.phi.unit is u.deg + assert s1.z.unit is u.kpc + + assert_allclose(s1.rho.value, 1) + assert_allclose(s1.phi.value, 2) + assert_allclose(s1.z.value, 3) + + def test_init_array(self): + + s1 = CylindricalRepresentation(rho=[1, 2, 3] * u.pc, + phi=[2, 3, 4] * u.deg, + z=[3, 4, 5] * u.kpc) + + assert s1.rho.unit is u.pc + assert s1.phi.unit is u.deg + assert s1.z.unit is u.kpc + + assert_allclose(s1.rho.value, [1, 2, 3]) + assert_allclose(s1.phi.value, [2, 3, 4]) + assert_allclose(s1.z.value, [3, 4, 5]) + + def test_init_array_nocopy(self): + + rho = [8, 9, 10] * u.pc + phi = [5, 6, 7] * u.deg + z = [2, 3, 4] * u.kpc + + s1 = CylindricalRepresentation(rho=rho, phi=phi, z=z, copy=False) + + rho[:] = [9, 2, 3] * u.kpc + phi[:] = [1, 2, 3] * u.arcmin + z[:] = [-2, 3, 8] * u.kpc + + assert_allclose_quantity(rho, s1.rho) + assert_allclose_quantity(phi, s1.phi) + assert_allclose_quantity(z, s1.z) + + def test_reprobj(self): + + s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc) + + s2 = CylindricalRepresentation.from_representation(s1) + + assert s2.rho == 1 * u.kpc + assert s2.phi == 2 * u.deg + assert s2.z == 3 * u.kpc + + def test_broadcasting(self): + + s1 = CylindricalRepresentation(rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=5 * u.kpc) + + assert s1.rho.unit == u.kpc + assert s1.phi.unit == u.deg + assert s1.z.unit == u.kpc + + assert_allclose(s1.rho.value, [1, 2]) + assert_allclose(s1.phi.value, [3, 4]) + assert_allclose(s1.z.value, [5, 5]) + + def test_broadcasting_mismatch(self): + + with pytest.raises(ValueError) as exc: + s1 = CylindricalRepresentation(rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=[5, 6, 7] * u.kpc) + assert exc.value.args[0] == "Input parameters rho, phi, and z cannot be broadcast" + + def test_readonly(self): + + s1 = CylindricalRepresentation(rho=1 * u.kpc, + phi=20 * u.deg, + z=3 * u.kpc) + + with pytest.raises(AttributeError): + s1.rho = 1. * u.kpc + + with pytest.raises(AttributeError): + s1.phi = 20 * u.deg + + with pytest.raises(AttributeError): + s1.z = 1. * u.kpc + + def unit_mismatch(self): + + q_len = u.Quantity([1], u.kpc) + q_nonlen = u.Quantity([1], u.kg) + + with pytest.raises(u.UnitsError) as exc: + s1 = CylindricalRepresentation(rho=q_nonlen, phi=10 * u.deg, z=q_len) + assert exc.value.args[0] == "rho and z should have matching physical types" + + with pytest.raises(u.UnitsError) as exc: + s1 = CylindricalRepresentation(rho=q_len, phi=10 * u.deg, z=q_nonlen) + assert exc.value.args[0] == "rho and z should have matching physical types" + + def test_getitem(self): + + s = CylindricalRepresentation(rho=np.arange(10) * u.pc, + phi=-np.arange(10) * u.deg, + z=1 * u.kpc) + + s_slc = s[2:8:2] + + assert_allclose_quantity(s_slc.rho, [2, 4, 6] * u.pc) + assert_allclose_quantity(s_slc.phi, [-2, -4, -6] * u.deg) + assert_allclose_quantity(s_slc.z, [1, 1, 1] * u.kpc) + + def test_getitem_scalar(self): + + s = CylindricalRepresentation(rho=1 * u.pc, + phi=-2 * u.deg, + z=3 * u.kpc) + + with pytest.raises(TypeError): + s_slc = s[0] + + +def test_cartesian_spherical_roundtrip(): + + s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc, + y=[3000., 4.] * u.pc, + z=[5., 6000.] * u.pc) + + s2 = SphericalRepresentation.from_representation(s1) + + s3 = CartesianRepresentation.from_representation(s2) + + s4 = SphericalRepresentation.from_representation(s3) + + assert_allclose_quantity(s1.x, s3.x) + assert_allclose_quantity(s1.y, s3.y) + assert_allclose_quantity(s1.z, s3.z) + + assert_allclose_quantity(s2.lon, s4.lon) + assert_allclose_quantity(s2.lat, s4.lat) + assert_allclose_quantity(s2.distance, s4.distance) + + +def test_cartesian_physics_spherical_roundtrip(): + + s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc, + y=[3000., 4.] * u.pc, + z=[5., 6000.] * u.pc) + + s2 = PhysicsSphericalRepresentation.from_representation(s1) + + s3 = CartesianRepresentation.from_representation(s2) + + s4 = PhysicsSphericalRepresentation.from_representation(s3) + + assert_allclose_quantity(s1.x, s3.x) + assert_allclose_quantity(s1.y, s3.y) + assert_allclose_quantity(s1.z, s3.z) + + assert_allclose_quantity(s2.phi, s4.phi) + assert_allclose_quantity(s2.theta, s4.theta) + assert_allclose_quantity(s2.r, s4.r) + + +def test_spherical_physics_spherical_roundtrip(): + + s1 = SphericalRepresentation(lon=3 * u.deg, lat=4 * u.deg, distance=3 * u.kpc) + + s2 = PhysicsSphericalRepresentation.from_representation(s1) + + s3 = SphericalRepresentation.from_representation(s2) + + s4 = PhysicsSphericalRepresentation.from_representation(s3) + + assert_allclose_quantity(s1.lon, s3.lon) + assert_allclose_quantity(s1.lat, s3.lat) + assert_allclose_quantity(s1.distance, s3.distance) + + assert_allclose_quantity(s2.phi, s4.phi) + assert_allclose_quantity(s2.theta, s4.theta) + assert_allclose_quantity(s2.r, s4.r) + + assert_allclose_quantity(s1.lon, s4.phi) + assert_allclose_quantity(s1.lat, 90. * u.deg - s4.theta) + assert_allclose_quantity(s1.distance, s4.r) + + +def test_cartesian_cylindrical_roundtrip(): + + s1 = CartesianRepresentation(x=np.array([1., 2000.]) * u.kpc, + y=np.array([3000., 4.]) * u.pc, + z=np.array([5., 600.]) * u.cm) + + s2 = CylindricalRepresentation.from_representation(s1) + + s3 = CartesianRepresentation.from_representation(s2) + + s4 = CylindricalRepresentation.from_representation(s3) + + assert_allclose_quantity(s1.x, s3.x) + assert_allclose_quantity(s1.y, s3.y) + assert_allclose_quantity(s1.z, s3.z) + + assert_allclose_quantity(s2.rho, s4.rho) + assert_allclose_quantity(s2.phi, s4.phi) + assert_allclose_quantity(s2.z, s4.z) + + +def test_unit_spherical_roundtrip(): + + s1 = UnitSphericalRepresentation(lon=[10., 30.] * u.deg, + lat=[5., 6.] * u.arcmin) + + s2 = CartesianRepresentation.from_representation(s1) + + s3 = SphericalRepresentation.from_representation(s2) + + s4 = UnitSphericalRepresentation.from_representation(s3) + + assert_allclose_quantity(s1.lon, s4.lon) + assert_allclose_quantity(s1.lat, s4.lat) + + +def test_no_unnecessary_copies(): + + s1 = UnitSphericalRepresentation(lon=[10., 30.] * u.deg, + lat=[5., 6.] * u.arcmin) + s2 = s1.represent_as(UnitSphericalRepresentation) + assert s2 is s1 + assert np.may_share_memory(s1.lon, s2.lon) + assert np.may_share_memory(s1.lat, s2.lat) + s3 = s1.represent_as(SphericalRepresentation) + assert np.may_share_memory(s1.lon, s3.lon) + assert np.may_share_memory(s1.lat, s3.lat) + s4 = s1.represent_as(CartesianRepresentation) + s5 = s4.represent_as(CylindricalRepresentation) + assert np.may_share_memory(s5.z, s4.z) + + +def test_representation_repr(): + r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc) + assert repr(r1) == ('').format(' 1., 2.5, 1.' if NUMPY_LT_1_14 + else '1., 2.5, 1.') + + r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) + assert repr(r2) == ('').format(' 1., 2., 3.' if NUMPY_LT_1_14 + else '1., 2., 3.') + + r3 = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc) + if NUMPY_LT_1_14: + assert repr(r3) == ('') + else: + assert repr(r3) == ('') + + +def test_representation_repr_multi_d(): + """Regression test for #5889.""" + cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit='m') + if NUMPY_LT_1_14: + assert repr(cr) == ( + '') + else: + assert repr(cr) == ( + '') + # This was broken before. + if NUMPY_LT_1_14: + assert repr(cr.T) == ( + '') + else: + assert repr(cr.T) == ( + '') + + +def test_representation_str(): + r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc) + assert str(r1) == ('( 1., 2.5, 1.) (deg, deg, kpc)' if NUMPY_LT_1_14 else + '(1., 2.5, 1.) (deg, deg, kpc)') + r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) + assert str(r2) == ('( 1., 2., 3.) kpc' if NUMPY_LT_1_14 else + '(1., 2., 3.) kpc') + r3 = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc) + assert str(r3) == ('[( 1., 4., 9.), ( 2., 4., 10.), ( 3., 4., 11.)] kpc' + if NUMPY_LT_1_14 else + '[(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)] kpc') + + +def test_representation_str_multi_d(): + """Regression test for #5889.""" + cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit='m') + if NUMPY_LT_1_14: + assert str(cr) == ( + '[[( 0., 9., 18.), ( 1., 10., 19.), ( 2., 11., 20.)],\n' + ' [( 3., 12., 21.), ( 4., 13., 22.), ( 5., 14., 23.)],\n' + ' [( 6., 15., 24.), ( 7., 16., 25.), ( 8., 17., 26.)]] m') + else: + assert str(cr) == ( + '[[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n' + ' [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n' + ' [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]] m') + # This was broken before. + if NUMPY_LT_1_14: + assert str(cr.T) == ( + '[[( 0., 9., 18.), ( 3., 12., 21.), ( 6., 15., 24.)],\n' + ' [( 1., 10., 19.), ( 4., 13., 22.), ( 7., 16., 25.)],\n' + ' [( 2., 11., 20.), ( 5., 14., 23.), ( 8., 17., 26.)]] m') + else: + assert str(cr.T) == ( + '[[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n' + ' [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n' + ' [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]] m') + + +def test_subclass_representation(): + from ..builtin_frames import ICRS + + class Longitude180(Longitude): + def __new__(cls, angle, unit=None, wrap_angle=180 * u.deg, **kwargs): + self = super(Longitude180, cls).__new__(cls, angle, unit=unit, + wrap_angle=wrap_angle, **kwargs) + return self + + class SphericalWrap180Representation(SphericalRepresentation): + attr_classes = OrderedDict([('lon', Longitude180), + ('lat', Latitude), + ('distance', u.Quantity)]) + recommended_units = {'lon': u.deg, 'lat': u.deg} + + class ICRSWrap180(ICRS): + frame_specific_representation_info = ICRS._frame_specific_representation_info.copy() + frame_specific_representation_info[SphericalWrap180Representation] = \ + frame_specific_representation_info[SphericalRepresentation] + default_representation = SphericalWrap180Representation + + c = ICRSWrap180(ra=-1 * u.deg, dec=-2 * u.deg, distance=1 * u.m) + assert c.ra.value == -1 + assert c.ra.unit is u.deg + assert c.dec.value == -2 + assert c.dec.unit is u.deg + + +def test_minimal_subclass(): + # Basically to check what we document works; + # see doc/coordinates/representations.rst + class LogDRepresentation(BaseRepresentation): + attr_classes = OrderedDict([('lon', Longitude), + ('lat', Latitude), + ('logd', u.Dex)]) + + def to_cartesian(self): + d = self.logd.physical + x = d * np.cos(self.lat) * np.cos(self.lon) + y = d * np.cos(self.lat) * np.sin(self.lon) + z = d * np.sin(self.lat) + return CartesianRepresentation(x=x, y=y, z=z, copy=False) + + @classmethod + def from_cartesian(cls, cart): + s = np.hypot(cart.x, cart.y) + r = np.hypot(s, cart.z) + lon = np.arctan2(cart.y, cart.x) + lat = np.arctan2(cart.z, s) + return cls(lon=lon, lat=lat, logd=u.Dex(r), copy=False) + + ld1 = LogDRepresentation(90.*u.deg, 0.*u.deg, 1.*u.dex(u.kpc)) + ld2 = LogDRepresentation(lon=90.*u.deg, lat=0.*u.deg, logd=1.*u.dex(u.kpc)) + assert np.all(ld1.lon == ld2.lon) + assert np.all(ld1.lat == ld2.lat) + assert np.all(ld1.logd == ld2.logd) + c = ld1.to_cartesian() + assert_allclose_quantity(c.xyz, [0., 10., 0.] * u.kpc, atol=1.*u.npc) + ld3 = LogDRepresentation.from_cartesian(c) + assert np.all(ld3.lon == ld2.lon) + assert np.all(ld3.lat == ld2.lat) + assert np.all(ld3.logd == ld2.logd) + s = ld1.represent_as(SphericalRepresentation) + assert_allclose_quantity(s.lon, ld1.lon) + assert_allclose_quantity(s.distance, 10.*u.kpc) + assert_allclose_quantity(s.lat, ld1.lat) + + with pytest.raises(TypeError): + LogDRepresentation(0.*u.deg, 1.*u.deg) + with pytest.raises(TypeError): + LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), lon=1.*u.deg) + with pytest.raises(TypeError): + LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), True, False) + with pytest.raises(TypeError): + LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), foo='bar') + + with pytest.raises(ValueError): + # check we cannot redefine an existing class. + class LogDRepresentation(BaseRepresentation): + attr_classes = OrderedDict([('lon', Longitude), + ('lat', Latitude), + ('logr', u.Dex)]) + + +def test_combine_xyz(): + + x, y, z = np.arange(27).reshape(3, 9) * u.kpc + xyz = _combine_xyz(x, y, z, xyz_axis=0) + assert xyz.shape == (3, 9) + assert np.all(xyz[0] == x) + assert np.all(xyz[1] == y) + assert np.all(xyz[2] == z) + + x, y, z = np.arange(27).reshape(3, 3, 3) * u.kpc + xyz = _combine_xyz(x, y, z, xyz_axis=0) + assert xyz.ndim == 3 + assert np.all(xyz[0] == x) + assert np.all(xyz[1] == y) + assert np.all(xyz[2] == z) + + xyz = _combine_xyz(x, y, z, xyz_axis=1) + assert xyz.ndim == 3 + assert np.all(xyz[:, 0] == x) + assert np.all(xyz[:, 1] == y) + assert np.all(xyz[:, 2] == z) + + xyz = _combine_xyz(x, y, z, xyz_axis=-1) + assert xyz.ndim == 3 + assert np.all(xyz[..., 0] == x) + assert np.all(xyz[..., 1] == y) + assert np.all(xyz[..., 2] == z) + + +class TestCartesianRepresentationWithDifferential(object): + + def test_init_differential(self): + + diff = CartesianDifferential(d_x=1 * u.km/u.s, + d_y=2 * u.km/u.s, + d_z=3 * u.km/u.s) + + # Check that a single differential gets turned into a 1-item dict. + s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, + differentials=diff) + + assert s1.x.unit is u.kpc + assert s1.y.unit is u.kpc + assert s1.z.unit is u.kpc + assert len(s1.differentials) == 1 + assert s1.differentials['s'] is diff + + # can also pass in an explicit dictionary + s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, + differentials={'s': diff}) + assert len(s1.differentials) == 1 + assert s1.differentials['s'] is diff + + # using the wrong key will cause it to fail + with pytest.raises(ValueError): + s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, + differentials={'1 / s2': diff}) + + # make sure other kwargs are handled properly + s1 = CartesianRepresentation(x=1, y=2, z=3, + differentials=diff, copy=False, unit=u.kpc) + assert len(s1.differentials) == 1 + assert s1.differentials['s'] is diff + + with pytest.raises(TypeError): # invalid type passed to differentials + CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, + differentials='garmonbozia') + + # make sure differentials can't accept differentials + with pytest.raises(TypeError): + CartesianDifferential(d_x=1 * u.km/u.s, d_y=2 * u.km/u.s, + d_z=3 * u.km/u.s, differentials=diff) + + def test_init_differential_compatible(self): + # TODO: more extensive checking of this + + # should fail - representation and differential not compatible + diff = SphericalDifferential(d_lon=1 * u.mas/u.yr, + d_lat=2 * u.mas/u.yr, + d_distance=3 * u.km/u.s) + with pytest.raises(TypeError): + CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, + differentials=diff) + + # should succeed - representation and differential are compatible + diff = SphericalCosLatDifferential(d_lon_coslat=1 * u.mas/u.yr, + d_lat=2 * u.mas/u.yr, + d_distance=3 * u.km/u.s) + + r1 = SphericalRepresentation(lon=15*u.deg, lat=21*u.deg, + distance=1*u.pc, + differentials=diff) + + def test_init_differential_multiple_equivalent_keys(self): + d1 = CartesianDifferential(*[1, 2, 3] * u.km/u.s) + d2 = CartesianDifferential(*[4, 5, 6] * u.km/u.s) + + # verify that the check against expected_unit validates against passing + # in two different but equivalent keys + with pytest.raises(ValueError): + r1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, + differentials={'s': d1, 'yr': d2}) + + def test_init_array_broadcasting(self): + + arr1 = np.arange(8).reshape(4, 2) * u.km/u.s + diff = CartesianDifferential(d_x=arr1, d_y=arr1, d_z=arr1) + + # shapes aren't compatible + arr2 = np.arange(27).reshape(3, 9) * u.kpc + with pytest.raises(ValueError): + rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2, + differentials=diff) + + arr2 = np.arange(8).reshape(4, 2) * u.kpc + rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2, + differentials=diff) + + assert rep.x.unit is u.kpc + assert rep.y.unit is u.kpc + assert rep.z.unit is u.kpc + assert len(rep.differentials) == 1 + assert rep.differentials['s'] is diff + + assert rep.xyz.shape == rep.differentials['s'].d_xyz.shape + + def test_reprobj(self): + + # should succeed - representation and differential are compatible + diff = SphericalCosLatDifferential(d_lon_coslat=1 * u.mas/u.yr, + d_lat=2 * u.mas/u.yr, + d_distance=3 * u.km/u.s) + + r1 = SphericalRepresentation(lon=15*u.deg, lat=21*u.deg, + distance=1*u.pc, + differentials=diff) + + r2 = CartesianRepresentation.from_representation(r1) + assert r2.get_name() == 'cartesian' + assert not r2.differentials + + def test_readonly(self): + + s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) + + with pytest.raises(AttributeError): # attribute is not settable + s1.differentials = 'thing' + + def test_represent_as(self): + + diff = CartesianDifferential(d_x=1 * u.km/u.s, + d_y=2 * u.km/u.s, + d_z=3 * u.km/u.s) + rep1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, + differentials=diff) + + # Only change the representation, drop the differential + new_rep = rep1.represent_as(SphericalRepresentation) + assert new_rep.get_name() == 'spherical' + assert not new_rep.differentials # dropped + + # Pass in separate classes for representation, differential + new_rep = rep1.represent_as(SphericalRepresentation, + SphericalCosLatDifferential) + assert new_rep.get_name() == 'spherical' + assert new_rep.differentials['s'].get_name() == 'sphericalcoslat' + + # Pass in a dictionary for the differential classes + new_rep = rep1.represent_as(SphericalRepresentation, + {'s': SphericalCosLatDifferential}) + assert new_rep.get_name() == 'spherical' + assert new_rep.differentials['s'].get_name() == 'sphericalcoslat' + + # make sure represent_as() passes through the differentials + for name in REPRESENTATION_CLASSES: + if name == 'radial': + # TODO: Converting a CartesianDifferential to a + # RadialDifferential fails, even on `master` + continue + new_rep = rep1.represent_as(REPRESENTATION_CLASSES[name], + DIFFERENTIAL_CLASSES[name]) + assert new_rep.get_name() == name + assert len(new_rep.differentials) == 1 + assert new_rep.differentials['s'].get_name() == name + + with pytest.raises(ValueError) as excinfo: + rep1.represent_as('name') + assert 'use frame object' in str(excinfo.value) + + def test_getitem(self): + + d = CartesianDifferential(d_x=np.arange(10) * u.m/u.s, + d_y=-np.arange(10) * u.m/u.s, + d_z=1. * u.m/u.s) + s = CartesianRepresentation(x=np.arange(10) * u.m, + y=-np.arange(10) * u.m, + z=3 * u.km, + differentials=d) + + s_slc = s[2:8:2] + s_dif = s_slc.differentials['s'] + + assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m) + assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m) + assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km) + + assert_allclose_quantity(s_dif.d_x, [2, 4, 6] * u.m/u.s) + assert_allclose_quantity(s_dif.d_y, [-2, -4, -6] * u.m/u.s) + assert_allclose_quantity(s_dif.d_z, [1, 1, 1] * u.m/u.s) + + def test_transform(self): + d1 = CartesianDifferential(d_x=[1, 2] * u.km/u.s, + d_y=[3, 4] * u.km/u.s, + d_z=[5, 6] * u.km/u.s) + r1 = CartesianRepresentation(x=[1, 2] * u.kpc, + y=[3, 4] * u.kpc, + z=[5, 6] * u.kpc, + differentials=d1) + + matrix = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + + r2 = r1.transform(matrix) + d2 = r2.differentials['s'] + assert_allclose_quantity(d2.d_x, [22., 28]*u.km/u.s) + assert_allclose_quantity(d2.d_y, [49, 64]*u.km/u.s) + assert_allclose_quantity(d2.d_z, [76, 100.]*u.km/u.s) + + def test_with_differentials(self): + # make sure with_differential correctly creates a new copy with the same + # differential + cr = CartesianRepresentation([1, 2, 3]*u.kpc) + diff = CartesianDifferential([.1, .2, .3]*u.km/u.s) + cr2 = cr.with_differentials(diff) + assert cr.differentials != cr2.differentials + assert cr2.differentials['s'] is diff + + # make sure it works even if a differential is present already + diff2 = CartesianDifferential([.1, .2, .3]*u.m/u.s) + cr3 = CartesianRepresentation([1, 2, 3]*u.kpc, differentials=diff) + cr4 = cr3.with_differentials(diff2) + assert cr4.differentials['s'] != cr3.differentials['s'] + assert cr4.differentials['s'] == diff2 + + # also ensure a *scalar* differential will works + cr5 = cr.with_differentials(diff) + assert len(cr5.differentials) == 1 + assert cr5.differentials['s'] == diff + + # make sure we don't update the original representation's dict + d1 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km/u.s) + d2 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km/u.s**2) + r1 = CartesianRepresentation(*np.random.random((3, 5)), unit=u.pc, + differentials=d1) + + r2 = r1.with_differentials(d2) + assert r1.differentials['s'] is r2.differentials['s'] + assert 's2' not in r1.differentials + assert 's2' in r2.differentials + + +def test_repr_with_differentials(): + diff = CartesianDifferential([.1, .2, .3]*u.km/u.s) + cr = CartesianRepresentation([1, 2, 3]*u.kpc, differentials=diff) + assert "has differentials w.r.t.: 's'" in repr(cr) + + +def test_to_cartesian(): + """ + Test that to_cartesian drops the differential. + """ + sd = SphericalDifferential(d_lat=1*u.deg, d_lon=2*u.deg, d_distance=10*u.m) + sr = SphericalRepresentation(lat=1*u.deg, lon=2*u.deg, distance=10*u.m, + differentials=sd) + + cart = sr.to_cartesian() + assert cart.get_name() == 'cartesian' + assert not cart.differentials diff --git a/astropy/coordinates/tests/test_representation_arithmetic.py b/astropy/coordinates/tests/test_representation_arithmetic.py new file mode 100644 index 0000000..1ff0419 --- /dev/null +++ b/astropy/coordinates/tests/test_representation_arithmetic.py @@ -0,0 +1,1225 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS +import functools + +import pytest +import numpy as np + +from ... import units as u +from .. import (PhysicsSphericalRepresentation, CartesianRepresentation, + CylindricalRepresentation, SphericalRepresentation, + UnitSphericalRepresentation, SphericalDifferential, + CartesianDifferential, UnitSphericalDifferential, + SphericalCosLatDifferential, UnitSphericalCosLatDifferential, + PhysicsSphericalDifferential, CylindricalDifferential, + RadialRepresentation, RadialDifferential, Longitude, Latitude) +from ..representation import DIFFERENTIAL_CLASSES +from ..angle_utilities import angular_separation +from ...utils.compat.numpy import broadcast_arrays +from ...tests.helper import assert_quantity_allclose + + +def assert_representation_allclose(actual, desired, rtol=1.e-7, atol=None, + **kwargs): + actual_xyz = actual.to_cartesian().get_xyz(xyz_axis=-1) + desired_xyz = desired.to_cartesian().get_xyz(xyz_axis=-1) + actual_xyz, desired_xyz = broadcast_arrays(actual_xyz, desired_xyz, + subok=True) + assert_quantity_allclose(actual_xyz, desired_xyz, rtol, atol, **kwargs) + + +def assert_differential_allclose(actual, desired, rtol=1.e-7, **kwargs): + assert actual.components == desired.components + for component in actual.components: + actual_c = getattr(actual, component) + atol = 1.e-10 * actual_c.unit + assert_quantity_allclose(actual_c, getattr(desired, component), + rtol, atol, **kwargs) + + +def representation_equal(first, second): + return functools.reduce(np.logical_and, + (getattr(first, component) == + getattr(second, component) + for component in first.components)) + + +class TestArithmetic(): + + def setup(self): + # Choose some specific coordinates, for which ``sum`` and ``dot`` + # works out nicely. + self.lon = Longitude(np.arange(0, 12.1, 2), u.hourangle) + self.lat = Latitude(np.arange(-90, 91, 30), u.deg) + self.distance = [5., 12., 4., 2., 4., 12., 5.] * u.kpc + self.spherical = SphericalRepresentation(self.lon, self.lat, + self.distance) + self.unit_spherical = self.spherical.represent_as( + UnitSphericalRepresentation) + self.cartesian = self.spherical.to_cartesian() + + def test_norm_spherical(self): + norm_s = self.spherical.norm() + assert isinstance(norm_s, u.Quantity) + # Just to be sure, test against getting object arrays. + assert norm_s.dtype.kind == 'f' + assert np.all(norm_s == self.distance) + + @pytest.mark.parametrize('representation', + (PhysicsSphericalRepresentation, + CartesianRepresentation, + CylindricalRepresentation)) + def test_norm(self, representation): + in_rep = self.spherical.represent_as(representation) + norm_rep = in_rep.norm() + assert isinstance(norm_rep, u.Quantity) + assert_quantity_allclose(norm_rep, self.distance) + + def test_norm_unitspherical(self): + norm_rep = self.unit_spherical.norm() + assert norm_rep.unit == u.dimensionless_unscaled + assert np.all(norm_rep == 1. * u.dimensionless_unscaled) + + @pytest.mark.parametrize('representation', + (SphericalRepresentation, + PhysicsSphericalRepresentation, + CartesianRepresentation, + CylindricalRepresentation, + UnitSphericalRepresentation)) + def test_neg_pos(self, representation): + in_rep = self.cartesian.represent_as(representation) + pos_rep = +in_rep + assert type(pos_rep) is type(in_rep) + assert pos_rep is not in_rep + assert np.all(representation_equal(pos_rep, in_rep)) + neg_rep = -in_rep + assert type(neg_rep) is type(in_rep) + assert np.all(neg_rep.norm() == in_rep.norm()) + in_rep_xyz = in_rep.to_cartesian().xyz + assert_quantity_allclose(neg_rep.to_cartesian().xyz, + -in_rep_xyz, atol=1.e-10*in_rep_xyz.unit) + + def test_mul_div_spherical(self): + s0 = self.spherical / (1. * u.Myr) + assert isinstance(s0, SphericalRepresentation) + assert s0.distance.dtype.kind == 'f' + assert np.all(s0.lon == self.spherical.lon) + assert np.all(s0.lat == self.spherical.lat) + assert np.all(s0.distance == self.distance / (1. * u.Myr)) + s1 = (1./u.Myr) * self.spherical + assert isinstance(s1, SphericalRepresentation) + assert np.all(representation_equal(s1, s0)) + s2 = self.spherical * np.array([[1.], [2.]]) + assert isinstance(s2, SphericalRepresentation) + assert s2.shape == (2, self.spherical.shape[0]) + assert np.all(s2.lon == self.spherical.lon) + assert np.all(s2.lat == self.spherical.lat) + assert np.all(s2.distance == + self.spherical.distance * np.array([[1.], [2.]])) + s3 = np.array([[1.], [2.]]) * self.spherical + assert isinstance(s3, SphericalRepresentation) + assert np.all(representation_equal(s3, s2)) + s4 = -self.spherical + assert isinstance(s4, SphericalRepresentation) + assert np.all(s4.lon == self.spherical.lon) + assert np.all(s4.lat == self.spherical.lat) + assert np.all(s4.distance == -self.spherical.distance) + s5 = +self.spherical + assert s5 is not self.spherical + assert np.all(representation_equal(s5, self.spherical)) + + @pytest.mark.parametrize('representation', + (PhysicsSphericalRepresentation, + CartesianRepresentation, + CylindricalRepresentation)) + def test_mul_div(self, representation): + in_rep = self.spherical.represent_as(representation) + r1 = in_rep / (1. * u.Myr) + assert isinstance(r1, representation) + for component in in_rep.components: + in_rep_comp = getattr(in_rep, component) + r1_comp = getattr(r1, component) + if in_rep_comp.unit == self.distance.unit: + assert np.all(r1_comp == in_rep_comp / (1.*u.Myr)) + else: + assert np.all(r1_comp == in_rep_comp) + + r2 = np.array([[1.], [2.]]) * in_rep + assert isinstance(r2, representation) + assert r2.shape == (2, in_rep.shape[0]) + assert_quantity_allclose(r2.norm(), + self.distance * np.array([[1.], [2.]])) + r3 = -in_rep + assert np.all(representation_equal(r3, in_rep * -1.)) + with pytest.raises(TypeError): + in_rep * in_rep + with pytest.raises(TypeError): + dict() * in_rep + + def test_mul_div_unit_spherical(self): + s1 = self.unit_spherical * self.distance + assert isinstance(s1, SphericalRepresentation) + assert np.all(s1.lon == self.unit_spherical.lon) + assert np.all(s1.lat == self.unit_spherical.lat) + assert np.all(s1.distance == self.spherical.distance) + s2 = self.unit_spherical / u.s + assert isinstance(s2, SphericalRepresentation) + assert np.all(s2.lon == self.unit_spherical.lon) + assert np.all(s2.lat == self.unit_spherical.lat) + assert np.all(s2.distance == 1./u.s) + u3 = -self.unit_spherical + assert isinstance(u3, UnitSphericalRepresentation) + assert_quantity_allclose(u3.lon, self.unit_spherical.lon + 180.*u.deg) + assert np.all(u3.lat == -self.unit_spherical.lat) + assert_quantity_allclose(u3.to_cartesian().xyz, + -self.unit_spherical.to_cartesian().xyz, + atol=1.e-10*u.dimensionless_unscaled) + u4 = +self.unit_spherical + assert isinstance(u4, UnitSphericalRepresentation) + assert u4 is not self.unit_spherical + assert np.all(representation_equal(u4, self.unit_spherical)) + + def test_add_sub_cartesian(self): + c1 = self.cartesian + self.cartesian + assert isinstance(c1, CartesianRepresentation) + assert c1.x.dtype.kind == 'f' + assert np.all(representation_equal(c1, 2. * self.cartesian)) + with pytest.raises(TypeError): + self.cartesian + 10.*u.m + with pytest.raises(u.UnitsError): + self.cartesian + (self.cartesian / u.s) + c2 = self.cartesian - self.cartesian + assert isinstance(c2, CartesianRepresentation) + assert np.all(representation_equal( + c2, CartesianRepresentation(0.*u.m, 0.*u.m, 0.*u.m))) + c3 = self.cartesian - self.cartesian / 2. + assert isinstance(c3, CartesianRepresentation) + assert np.all(representation_equal(c3, self.cartesian / 2.)) + + @pytest.mark.parametrize('representation', + (PhysicsSphericalRepresentation, + SphericalRepresentation, + CylindricalRepresentation)) + def test_add_sub(self, representation): + in_rep = self.cartesian.represent_as(representation) + r1 = in_rep + in_rep + assert isinstance(r1, representation) + expected = 2. * in_rep + for component in in_rep.components: + assert_quantity_allclose(getattr(r1, component), + getattr(expected, component)) + with pytest.raises(TypeError): + 10.*u.m + in_rep + with pytest.raises(u.UnitsError): + in_rep + (in_rep / u.s) + r2 = in_rep - in_rep + assert isinstance(r2, representation) + assert np.all(representation_equal( + r2.to_cartesian(), CartesianRepresentation(0.*u.m, 0.*u.m, 0.*u.m))) + r3 = in_rep - in_rep / 2. + assert isinstance(r3, representation) + expected = in_rep / 2. + assert_representation_allclose(r3, expected) + + def test_add_sub_unit_spherical(self): + s1 = self.unit_spherical + self.unit_spherical + assert isinstance(s1, SphericalRepresentation) + expected = 2. * self.unit_spherical + for component in s1.components: + assert_quantity_allclose(getattr(s1, component), + getattr(expected, component)) + with pytest.raises(TypeError): + 10.*u.m - self.unit_spherical + with pytest.raises(u.UnitsError): + self.unit_spherical + (self.unit_spherical / u.s) + s2 = self.unit_spherical - self.unit_spherical / 2. + assert isinstance(s2, SphericalRepresentation) + expected = self.unit_spherical / 2. + for component in s2.components: + assert_quantity_allclose(getattr(s2, component), + getattr(expected, component)) + + @pytest.mark.parametrize('representation', + (CartesianRepresentation, + PhysicsSphericalRepresentation, + SphericalRepresentation, + CylindricalRepresentation)) + def test_sum_mean(self, representation): + in_rep = self.spherical.represent_as(representation) + r_sum = in_rep.sum() + assert isinstance(r_sum, representation) + expected = SphericalRepresentation( + 90. * u.deg, 0. * u.deg, 14. * u.kpc).represent_as(representation) + for component in expected.components: + exp_component = getattr(expected, component) + assert_quantity_allclose(getattr(r_sum, component), + exp_component, + atol=1e-10*exp_component.unit) + + r_mean = in_rep.mean() + assert isinstance(r_mean, representation) + expected = expected / len(in_rep) + for component in expected.components: + exp_component = getattr(expected, component) + assert_quantity_allclose(getattr(r_mean, component), + exp_component, + atol=1e-10*exp_component.unit) + + def test_sum_mean_unit_spherical(self): + s_sum = self.unit_spherical.sum() + assert isinstance(s_sum, SphericalRepresentation) + expected = SphericalRepresentation( + 90. * u.deg, 0. * u.deg, 3. * u.dimensionless_unscaled) + for component in expected.components: + exp_component = getattr(expected, component) + assert_quantity_allclose(getattr(s_sum, component), + exp_component, + atol=1e-10*exp_component.unit) + + s_mean = self.unit_spherical.mean() + assert isinstance(s_mean, SphericalRepresentation) + expected = expected / len(self.unit_spherical) + for component in expected.components: + exp_component = getattr(expected, component) + assert_quantity_allclose(getattr(s_mean, component), + exp_component, + atol=1e-10*exp_component.unit) + + @pytest.mark.parametrize('representation', + (CartesianRepresentation, + PhysicsSphericalRepresentation, + SphericalRepresentation, + CylindricalRepresentation)) + def test_dot(self, representation): + in_rep = self.cartesian.represent_as(representation) + r_dot_r = in_rep.dot(in_rep) + assert isinstance(r_dot_r, u.Quantity) + assert r_dot_r.shape == in_rep.shape + assert_quantity_allclose(np.sqrt(r_dot_r), self.distance) + r_dot_r_rev = in_rep.dot(in_rep[::-1]) + assert isinstance(r_dot_r_rev, u.Quantity) + assert r_dot_r_rev.shape == in_rep.shape + expected = [-25., -126., 2., 4., 2., -126., -25.] * u.kpc**2 + assert_quantity_allclose(r_dot_r_rev, expected) + for axis in 'xyz': + project = CartesianRepresentation(*( + (1. if axis == _axis else 0.) * u.dimensionless_unscaled + for _axis in 'xyz')) + assert_quantity_allclose(in_rep.dot(project), + getattr(self.cartesian, axis), + atol=1.*u.upc) + with pytest.raises(TypeError): + in_rep.dot(self.cartesian.xyz) + + def test_dot_unit_spherical(self): + u_dot_u = self.unit_spherical.dot(self.unit_spherical) + assert isinstance(u_dot_u, u.Quantity) + assert u_dot_u.shape == self.unit_spherical.shape + assert_quantity_allclose(u_dot_u, 1.*u.dimensionless_unscaled) + cartesian = self.unit_spherical.to_cartesian() + for axis in 'xyz': + project = CartesianRepresentation(*( + (1. if axis == _axis else 0.) * u.dimensionless_unscaled + for _axis in 'xyz')) + assert_quantity_allclose(self.unit_spherical.dot(project), + getattr(cartesian, axis), atol=1.e-10) + + @pytest.mark.parametrize('representation', + (CartesianRepresentation, + PhysicsSphericalRepresentation, + SphericalRepresentation, + CylindricalRepresentation)) + def test_cross(self, representation): + in_rep = self.cartesian.represent_as(representation) + r_cross_r = in_rep.cross(in_rep) + assert isinstance(r_cross_r, representation) + assert_quantity_allclose(r_cross_r.norm(), 0.*u.kpc**2, + atol=1.*u.mpc**2) + r_cross_r_rev = in_rep.cross(in_rep[::-1]) + sep = angular_separation(self.lon, self.lat, + self.lon[::-1], self.lat[::-1]) + expected = self.distance * self.distance[::-1] * np.sin(sep) + assert_quantity_allclose(r_cross_r_rev.norm(), expected, + atol=1.*u.mpc**2) + unit_vectors = CartesianRepresentation( + [1., 0., 0.]*u.one, + [0., 1., 0.]*u.one, + [0., 0., 1.]*u.one)[:, np.newaxis] + r_cross_uv = in_rep.cross(unit_vectors) + assert r_cross_uv.shape == (3, 7) + assert_quantity_allclose(r_cross_uv.dot(unit_vectors), 0.*u.kpc, + atol=1.*u.upc) + assert_quantity_allclose(r_cross_uv.dot(in_rep), 0.*u.kpc**2, + atol=1.*u.mpc**2) + zeros = np.zeros(len(in_rep)) * u.kpc + expected = CartesianRepresentation( + u.Quantity((zeros, -self.cartesian.z, self.cartesian.y)), + u.Quantity((self.cartesian.z, zeros, -self.cartesian.x)), + u.Quantity((-self.cartesian.y, self.cartesian.x, zeros))) + # Comparison with spherical is hard since some distances are zero, + # implying the angles are undefined. + r_cross_uv_cartesian = r_cross_uv.to_cartesian() + assert_representation_allclose(r_cross_uv_cartesian, + expected, atol=1.*u.upc) + # A final check, with the side benefit of ensuring __div__ and norm + # work on multi-D representations. + r_cross_uv_by_distance = r_cross_uv / self.distance + uv_sph = unit_vectors.represent_as(UnitSphericalRepresentation) + sep = angular_separation(self.lon, self.lat, uv_sph.lon, uv_sph.lat) + assert_quantity_allclose(r_cross_uv_by_distance.norm(), np.sin(sep), + atol=1e-9) + + with pytest.raises(TypeError): + in_rep.cross(self.cartesian.xyz) + + def test_cross_unit_spherical(self): + u_cross_u = self.unit_spherical.cross(self.unit_spherical) + assert isinstance(u_cross_u, SphericalRepresentation) + assert_quantity_allclose(u_cross_u.norm(), 0.*u.one, atol=1.e-10*u.one) + u_cross_u_rev = self.unit_spherical.cross(self.unit_spherical[::-1]) + assert isinstance(u_cross_u_rev, SphericalRepresentation) + sep = angular_separation(self.lon, self.lat, + self.lon[::-1], self.lat[::-1]) + expected = np.sin(sep) + assert_quantity_allclose(u_cross_u_rev.norm(), expected, + atol=1.e-10*u.one) + + +class TestUnitVectorsAndScales(): + + @staticmethod + def check_unit_vectors(e): + for v in e.values(): + assert type(v) is CartesianRepresentation + assert_quantity_allclose(v.norm(), 1. * u.one) + return e + + @staticmethod + def check_scale_factors(sf, rep): + unit = rep.norm().unit + for c, f in sf.items(): + assert type(f) is u.Quantity + assert (f.unit * getattr(rep, c).unit).is_equivalent(unit) + + def test_spherical(self): + s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle, + lat=[0., -30., 85.] * u.deg, + distance=[1, 2, 3] * u.kpc) + e = s.unit_vectors() + self.check_unit_vectors(e) + sf = s.scale_factors() + self.check_scale_factors(sf, s) + + s_lon = s + s.distance * 1e-5 * np.cos(s.lat) * e['lon'] + assert_quantity_allclose(s_lon.lon, s.lon + 1e-5*u.rad, + atol=1e-10*u.rad) + assert_quantity_allclose(s_lon.lat, s.lat, atol=1e-10*u.rad) + assert_quantity_allclose(s_lon.distance, s.distance) + s_lon2 = s + 1e-5 * u.radian * sf['lon'] * e['lon'] + assert_representation_allclose(s_lon2, s_lon) + + s_lat = s + s.distance * 1e-5 * e['lat'] + assert_quantity_allclose(s_lat.lon, s.lon) + assert_quantity_allclose(s_lat.lat, s.lat + 1e-5*u.rad, + atol=1e-10*u.rad) + assert_quantity_allclose(s_lon.distance, s.distance) + s_lat2 = s + 1.e-5 * u.radian * sf['lat'] * e['lat'] + assert_representation_allclose(s_lat2, s_lat) + + s_distance = s + 1. * u.pc * e['distance'] + assert_quantity_allclose(s_distance.lon, s.lon, atol=1e-10*u.rad) + assert_quantity_allclose(s_distance.lat, s.lat, atol=1e-10*u.rad) + assert_quantity_allclose(s_distance.distance, s.distance + 1.*u.pc) + s_distance2 = s + 1. * u.pc * sf['distance'] * e['distance'] + assert_representation_allclose(s_distance2, s_distance) + + def test_unit_spherical(self): + s = UnitSphericalRepresentation(lon=[0., 6., 21.] * u.hourangle, + lat=[0., -30., 85.] * u.deg) + + e = s.unit_vectors() + self.check_unit_vectors(e) + sf = s.scale_factors() + self.check_scale_factors(sf, s) + + s_lon = s + 1e-5 * np.cos(s.lat) * e['lon'] + assert_quantity_allclose(s_lon.lon, s.lon + 1e-5*u.rad, + atol=1e-10*u.rad) + assert_quantity_allclose(s_lon.lat, s.lat, atol=1e-10*u.rad) + s_lon2 = s + 1e-5 * u.radian * sf['lon'] * e['lon'] + assert_representation_allclose(s_lon2, s_lon) + + s_lat = s + 1e-5 * e['lat'] + assert_quantity_allclose(s_lat.lon, s.lon) + assert_quantity_allclose(s_lat.lat, s.lat + 1e-5*u.rad, + atol=1e-10*u.rad) + s_lat2 = s + 1.e-5 * u.radian * sf['lat'] * e['lat'] + assert_representation_allclose(s_lat2, s_lat) + + def test_radial(self): + r = RadialRepresentation(10.*u.kpc) + with pytest.raises(NotImplementedError): + r.unit_vectors() + sf = r.scale_factors() + assert np.all(sf['distance'] == 1.*u.one) + assert np.all(r.norm() == r.distance) + with pytest.raises(TypeError): + r + r + + def test_physical_spherical(self): + + s = PhysicsSphericalRepresentation(phi=[0., 6., 21.] * u.hourangle, + theta=[90., 120., 5.] * u.deg, + r=[1, 2, 3] * u.kpc) + + e = s.unit_vectors() + self.check_unit_vectors(e) + sf = s.scale_factors() + self.check_scale_factors(sf, s) + + s_phi = s + s.r * 1e-5 * np.sin(s.theta) * e['phi'] + assert_quantity_allclose(s_phi.phi, s.phi + 1e-5*u.rad, + atol=1e-10*u.rad) + assert_quantity_allclose(s_phi.theta, s.theta, atol=1e-10*u.rad) + assert_quantity_allclose(s_phi.r, s.r) + s_phi2 = s + 1e-5 * u.radian * sf['phi'] * e['phi'] + assert_representation_allclose(s_phi2, s_phi) + + s_theta = s + s.r * 1e-5 * e['theta'] + assert_quantity_allclose(s_theta.phi, s.phi) + assert_quantity_allclose(s_theta.theta, s.theta + 1e-5*u.rad, + atol=1e-10*u.rad) + assert_quantity_allclose(s_theta.r, s.r) + s_theta2 = s + 1.e-5 * u.radian * sf['theta'] * e['theta'] + assert_representation_allclose(s_theta2, s_theta) + + s_r = s + 1. * u.pc * e['r'] + assert_quantity_allclose(s_r.phi, s.phi, atol=1e-10*u.rad) + assert_quantity_allclose(s_r.theta, s.theta, atol=1e-10*u.rad) + assert_quantity_allclose(s_r.r, s.r + 1.*u.pc) + s_r2 = s + 1. * u.pc * sf['r'] * e['r'] + assert_representation_allclose(s_r2, s_r) + + def test_cartesian(self): + + s = CartesianRepresentation(x=[1, 2, 3] * u.pc, + y=[2, 3, 4] * u.Mpc, + z=[3, 4, 5] * u.kpc) + + e = s.unit_vectors() + sf = s.scale_factors() + for v, expected in zip(e.values(), ([1., 0., 0.] * u.one, + [0., 1., 0.] * u.one, + [0., 0., 1.] * u.one)): + assert np.all(v.get_xyz(xyz_axis=-1) == expected) + for f in sf.values(): + assert np.all(f == 1.*u.one) + + def test_cylindrical(self): + + s = CylindricalRepresentation(rho=[1, 2, 3] * u.pc, + phi=[0., 90., -45.] * u.deg, + z=[3, 4, 5] * u.kpc) + e = s.unit_vectors() + self.check_unit_vectors(e) + sf = s.scale_factors() + self.check_scale_factors(sf, s) + + s_rho = s + 1. * u.pc * e['rho'] + assert_quantity_allclose(s_rho.rho, s.rho + 1.*u.pc) + assert_quantity_allclose(s_rho.phi, s.phi) + assert_quantity_allclose(s_rho.z, s.z) + s_rho2 = s + 1. * u.pc * sf['rho'] * e['rho'] + assert_representation_allclose(s_rho2, s_rho) + + s_phi = s + s.rho * 1e-5 * e['phi'] + assert_quantity_allclose(s_phi.rho, s.rho) + assert_quantity_allclose(s_phi.phi, s.phi + 1e-5*u.rad) + assert_quantity_allclose(s_phi.z, s.z) + s_phi2 = s + 1e-5 * u.radian * sf['phi'] * e['phi'] + assert_representation_allclose(s_phi2, s_phi) + + s_z = s + 1. * u.pc * e['z'] + assert_quantity_allclose(s_z.rho, s.rho) + assert_quantity_allclose(s_z.phi, s.phi, atol=1e-10*u.rad) + assert_quantity_allclose(s_z.z, s.z + 1.*u.pc) + s_z2 = s + 1. * u.pc * sf['z'] * e['z'] + assert_representation_allclose(s_z2, s_z) + + +@pytest.mark.parametrize('omit_coslat', [False, True], scope='class') +class TestSphericalDifferential(): + # these test cases are subclassed for SphericalCosLatDifferential, + # hence some tests depend on omit_coslat. + + def _setup(self, omit_coslat): + if omit_coslat: + self.SD_cls = SphericalCosLatDifferential + else: + self.SD_cls = SphericalDifferential + + s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle, + lat=[0., -30., 85.] * u.deg, + distance=[1, 2, 3] * u.kpc) + self.s = s + self.e = s.unit_vectors() + self.sf = s.scale_factors(omit_coslat=omit_coslat) + + def test_name_coslat(self, omit_coslat): + self._setup(omit_coslat) + if omit_coslat: + assert self.SD_cls is SphericalCosLatDifferential + assert self.SD_cls.get_name() == 'sphericalcoslat' + else: + assert self.SD_cls is SphericalDifferential + assert self.SD_cls.get_name() == 'spherical' + assert self.SD_cls.get_name() in DIFFERENTIAL_CLASSES + + def test_simple_differentials(self, omit_coslat): + self._setup(omit_coslat) + s, e, sf = self.s, self.e, self.sf + + o_lon = self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc) + o_lonc = o_lon.to_cartesian(base=s) + o_lon2 = self.SD_cls.from_cartesian(o_lonc, base=s) + assert_differential_allclose(o_lon, o_lon2) + # simple check by hand for first element. + # lat[0] is 0, so cos(lat) term doesn't matter. + assert_quantity_allclose(o_lonc[0].xyz, + [0., np.pi/180./3600., 0.]*u.kpc) + # check all using unit vectors and scale factors. + s_lon = s + 1.*u.arcsec * sf['lon'] * e['lon'] + assert_representation_allclose(o_lonc, s_lon - s, atol=1*u.npc) + s_lon2 = s + o_lon + assert_representation_allclose(s_lon2, s_lon, atol=1*u.npc) + + o_lat = self.SD_cls(0.*u.arcsec, 1.*u.arcsec, 0.*u.kpc) + o_latc = o_lat.to_cartesian(base=s) + assert_quantity_allclose(o_latc[0].xyz, + [0., 0., np.pi/180./3600.]*u.kpc, + atol=1.*u.npc) + s_lat = s + 1.*u.arcsec * sf['lat'] * e['lat'] + assert_representation_allclose(o_latc, s_lat - s, atol=1*u.npc) + s_lat2 = s + o_lat + assert_representation_allclose(s_lat2, s_lat, atol=1*u.npc) + + o_distance = self.SD_cls(0.*u.arcsec, 0.*u.arcsec, 1.*u.mpc) + o_distancec = o_distance.to_cartesian(base=s) + assert_quantity_allclose(o_distancec[0].xyz, + [1e-6, 0., 0.]*u.kpc, atol=1.*u.npc) + s_distance = s + 1.*u.mpc * sf['distance'] * e['distance'] + assert_representation_allclose(o_distancec, s_distance - s, + atol=1*u.npc) + s_distance2 = s + o_distance + assert_representation_allclose(s_distance2, s_distance) + + def test_differential_arithmetic(self, omit_coslat): + self._setup(omit_coslat) + s = self.s + + o_lon = self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc) + o_lon_by_2 = o_lon / 2. + assert_representation_allclose(o_lon_by_2.to_cartesian(s) * 2., + o_lon.to_cartesian(s), atol=1e-10*u.kpc) + assert_representation_allclose(s + o_lon, s + 2 * o_lon_by_2, + atol=1e-10*u.kpc) + o_lon_rec = o_lon_by_2 + o_lon_by_2 + assert_representation_allclose(s + o_lon, s + o_lon_rec, + atol=1e-10*u.kpc) + o_lon_0 = o_lon - o_lon + for c in o_lon_0.components: + assert np.all(getattr(o_lon_0, c) == 0.) + o_lon2 = self.SD_cls(1*u.mas/u.yr, 0*u.mas/u.yr, 0*u.km/u.s) + assert_quantity_allclose(o_lon2.norm(s)[0], 4.74*u.km/u.s, + atol=0.01*u.km/u.s) + assert_representation_allclose(o_lon2.to_cartesian(s) * 1000.*u.yr, + o_lon.to_cartesian(s), atol=1e-10*u.kpc) + s_off = s + o_lon + s_off2 = s + o_lon2 * 1000.*u.yr + assert_representation_allclose(s_off, s_off2, atol=1e-10*u.kpc) + + factor = 1e5 * u.radian/u.arcsec + if not omit_coslat: + factor = factor / np.cos(s.lat) + s_off_big = s + o_lon * factor + + assert_representation_allclose( + s_off_big, SphericalRepresentation(s.lon + 90.*u.deg, 0.*u.deg, + 1e5*s.distance), + atol=5.*u.kpc) + + o_lon3c = CartesianRepresentation(0., 4.74047, 0., unit=u.km/u.s) + o_lon3 = self.SD_cls.from_cartesian(o_lon3c, base=s) + expected0 = self.SD_cls(1.*u.mas/u.yr, 0.*u.mas/u.yr, 0.*u.km/u.s) + assert_differential_allclose(o_lon3[0], expected0) + s_off_big2 = s + o_lon3 * 1e5 * u.yr * u.radian/u.mas + assert_representation_allclose( + s_off_big2, SphericalRepresentation(90.*u.deg, 0.*u.deg, + 1e5*u.kpc), atol=5.*u.kpc) + + with pytest.raises(TypeError): + o_lon - s + with pytest.raises(TypeError): + s.to_cartesian() + o_lon + + def test_differential_init_errors(self, omit_coslat): + self._setup(omit_coslat) + s = self.s + with pytest.raises(u.UnitsError): + self.SD_cls(1.*u.arcsec, 0., 0.) + with pytest.raises(TypeError): + self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc, + False, False) + with pytest.raises(TypeError): + self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc, + copy=False, d_lat=0.*u.arcsec) + with pytest.raises(TypeError): + self.SD_cls(1.*u.arcsec, 0.*u.arcsec, 0.*u.kpc, + copy=False, flying='circus') + with pytest.raises(ValueError): + self.SD_cls(np.ones(2)*u.arcsec, + np.zeros(3)*u.arcsec, np.zeros(2)*u.kpc) + with pytest.raises(u.UnitsError): + self.SD_cls(1.*u.arcsec, 1.*u.s, 0.*u.kpc) + with pytest.raises(u.UnitsError): + self.SD_cls(1.*u.kpc, 1.*u.arcsec, 0.*u.kpc) + o = self.SD_cls(1.*u.arcsec, 1.*u.arcsec, 0.*u.km/u.s) + with pytest.raises(u.UnitsError): + o.to_cartesian(s) + with pytest.raises(AttributeError): + o.d_lat = 0.*u.arcsec + with pytest.raises(AttributeError): + del o.d_lat + + o = self.SD_cls(1.*u.arcsec, 1.*u.arcsec, 0.*u.km) + with pytest.raises(TypeError): + o.to_cartesian() + c = CartesianRepresentation(10., 0., 0., unit=u.km) + with pytest.raises(TypeError): + self.SD_cls.to_cartesian(c) + with pytest.raises(TypeError): + self.SD_cls.from_cartesian(c) + with pytest.raises(TypeError): + self.SD_cls.from_cartesian(c, SphericalRepresentation) + with pytest.raises(TypeError): + self.SD_cls.from_cartesian(c, c) + + +@pytest.mark.parametrize('omit_coslat', [False, True], scope='class') +class TestUnitSphericalDifferential(): + def _setup(self, omit_coslat): + if omit_coslat: + self.USD_cls = UnitSphericalCosLatDifferential + else: + self.USD_cls = UnitSphericalDifferential + + s = UnitSphericalRepresentation(lon=[0., 6., 21.] * u.hourangle, + lat=[0., -30., 85.] * u.deg) + self.s = s + self.e = s.unit_vectors() + self.sf = s.scale_factors(omit_coslat=omit_coslat) + + def test_name_coslat(self, omit_coslat): + self._setup(omit_coslat) + if omit_coslat: + assert self.USD_cls is UnitSphericalCosLatDifferential + assert self.USD_cls.get_name() == 'unitsphericalcoslat' + else: + assert self.USD_cls is UnitSphericalDifferential + assert self.USD_cls.get_name() == 'unitspherical' + assert self.USD_cls.get_name() in DIFFERENTIAL_CLASSES + + def test_simple_differentials(self, omit_coslat): + self._setup(omit_coslat) + s, e, sf = self.s, self.e, self.sf + + o_lon = self.USD_cls(1.*u.arcsec, 0.*u.arcsec) + o_lonc = o_lon.to_cartesian(base=s) + o_lon2 = self.USD_cls.from_cartesian(o_lonc, base=s) + assert_differential_allclose(o_lon, o_lon2) + # simple check by hand for first element + # (lat[0]=0, so works for both normal and CosLat differential) + assert_quantity_allclose(o_lonc[0].xyz, + [0., np.pi/180./3600., 0.]*u.one) + # check all using unit vectors and scale factors. + s_lon = s + 1.*u.arcsec * sf['lon'] * e['lon'] + assert type(s_lon) is SphericalRepresentation + assert_representation_allclose(o_lonc, s_lon - s, atol=1e-10*u.one) + s_lon2 = s + o_lon + assert_representation_allclose(s_lon2, s_lon, atol=1e-10*u.one) + + o_lat = self.USD_cls(0.*u.arcsec, 1.*u.arcsec) + o_latc = o_lat.to_cartesian(base=s) + assert_quantity_allclose(o_latc[0].xyz, + [0., 0., np.pi/180./3600.]*u.one, + atol=1e-10*u.one) + s_lat = s + 1.*u.arcsec * sf['lat'] * e['lat'] + assert type(s_lat) is SphericalRepresentation + assert_representation_allclose(o_latc, s_lat - s, atol=1e-10*u.one) + s_lat2 = s + o_lat + assert_representation_allclose(s_lat2, s_lat, atol=1e-10*u.one) + + def test_differential_arithmetic(self, omit_coslat): + self._setup(omit_coslat) + s = self.s + + o_lon = self.USD_cls(1.*u.arcsec, 0.*u.arcsec) + o_lon_by_2 = o_lon / 2. + assert type(o_lon_by_2) is self.USD_cls + assert_representation_allclose(o_lon_by_2.to_cartesian(s) * 2., + o_lon.to_cartesian(s), atol=1e-10*u.one) + s_lon = s + o_lon + s_lon2 = s + 2 * o_lon_by_2 + assert type(s_lon) is SphericalRepresentation + assert_representation_allclose(s_lon, s_lon2, atol=1e-10*u.one) + o_lon_rec = o_lon_by_2 + o_lon_by_2 + assert type(o_lon_rec) is self.USD_cls + assert representation_equal(o_lon, o_lon_rec) + assert_representation_allclose(s + o_lon, s + o_lon_rec, + atol=1e-10*u.one) + o_lon_0 = o_lon - o_lon + assert type(o_lon_0) is self.USD_cls + for c in o_lon_0.components: + assert np.all(getattr(o_lon_0, c) == 0.) + + o_lon2 = self.USD_cls(1.*u.mas/u.yr, 0.*u.mas/u.yr) + kks = u.km/u.kpc/u.s + assert_quantity_allclose(o_lon2.norm(s)[0], 4.74047*kks, atol=1e-4*kks) + assert_representation_allclose(o_lon2.to_cartesian(s) * 1000.*u.yr, + o_lon.to_cartesian(s), atol=1e-10*u.one) + s_off = s + o_lon + s_off2 = s + o_lon2 * 1000.*u.yr + assert_representation_allclose(s_off, s_off2, atol=1e-10*u.one) + + factor = 1e5 * u.radian/u.arcsec + if not omit_coslat: + factor = factor / np.cos(s.lat) + s_off_big = s + o_lon * factor + + assert_representation_allclose( + s_off_big, SphericalRepresentation(s.lon + 90.*u.deg, + 0.*u.deg, 1e5), + atol=5.*u.one) + + o_lon3c = CartesianRepresentation(0., 4.74047, 0., unit=kks) + # This looses information!! + o_lon3 = self.USD_cls.from_cartesian(o_lon3c, base=s) + expected0 = self.USD_cls(1.*u.mas/u.yr, 0.*u.mas/u.yr) + assert_differential_allclose(o_lon3[0], expected0) + # Part of motion kept. + part_kept = s.cross(CartesianRepresentation(0, 1, 0, unit=u.one)).norm() + assert_quantity_allclose(o_lon3.norm(s), 4.74047*part_kept*kks, + atol=1e-10*kks) + # (lat[0]=0, so works for both normal and CosLat differential) + s_off_big2 = s + o_lon3 * 1e5 * u.yr * u.radian/u.mas + expected0 = SphericalRepresentation(90.*u.deg, 0.*u.deg, + 1e5*u.one) + assert_representation_allclose(s_off_big2[0], expected0, atol=5.*u.one) + + def test_differential_init_errors(self, omit_coslat): + self._setup(omit_coslat) + with pytest.raises(u.UnitsError): + self.USD_cls(0.*u.deg, 10.*u.deg/u.yr) + + +class TestRadialDifferential(): + def setup(self): + s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle, + lat=[0., -30., 85.] * u.deg, + distance=[1, 2, 3] * u.kpc) + self.s = s + self.r = s.represent_as(RadialRepresentation) + self.e = s.unit_vectors() + self.sf = s.scale_factors() + + def test_name(self): + assert RadialDifferential.get_name() == 'radial' + assert RadialDifferential.get_name() in DIFFERENTIAL_CLASSES + + def test_simple_differentials(self): + r, s, e, sf = self.r, self.s, self.e, self.sf + + o_distance = RadialDifferential(1.*u.mpc) + # Can be applied to RadialRepresentation, though not most useful. + r_distance = r + o_distance + assert_quantity_allclose(r_distance.distance, + r.distance + o_distance.d_distance) + r_distance2 = o_distance + r + assert_quantity_allclose(r_distance2.distance, + r.distance + o_distance.d_distance) + # More sense to apply it relative to spherical representation. + o_distancec = o_distance.to_cartesian(base=s) + assert_quantity_allclose(o_distancec[0].xyz, + [1e-6, 0., 0.]*u.kpc, atol=1.*u.npc) + o_recover = RadialDifferential.from_cartesian(o_distancec, base=s) + assert_quantity_allclose(o_recover.d_distance, o_distance.d_distance) + + s_distance = s + 1.*u.mpc * sf['distance'] * e['distance'] + assert_representation_allclose(o_distancec, s_distance - s, + atol=1*u.npc) + s_distance2 = s + o_distance + assert_representation_allclose(s_distance2, s_distance) + + +class TestPhysicsSphericalDifferential(): + """Test copied from SphericalDifferential, so less extensive.""" + + def setup(self): + s = PhysicsSphericalRepresentation(phi=[0., 90., 315.] * u.deg, + theta=[90., 120., 5.] * u.deg, + r=[1, 2, 3] * u.kpc) + self.s = s + self.e = s.unit_vectors() + self.sf = s.scale_factors() + + def test_name(self): + assert PhysicsSphericalDifferential.get_name() == 'physicsspherical' + assert PhysicsSphericalDifferential.get_name() in DIFFERENTIAL_CLASSES + + def test_simple_differentials(self): + s, e, sf = self.s, self.e, self.sf + + o_phi = PhysicsSphericalDifferential(1*u.arcsec, 0*u.arcsec, 0*u.kpc) + o_phic = o_phi.to_cartesian(base=s) + o_phi2 = PhysicsSphericalDifferential.from_cartesian(o_phic, base=s) + assert_quantity_allclose(o_phi.d_phi, o_phi2.d_phi, atol=1.*u.narcsec) + assert_quantity_allclose(o_phi.d_theta, o_phi2.d_theta, + atol=1.*u.narcsec) + assert_quantity_allclose(o_phi.d_r, o_phi2.d_r, atol=1.*u.npc) + # simple check by hand for first element. + assert_quantity_allclose(o_phic[0].xyz, + [0., np.pi/180./3600., 0.]*u.kpc, + atol=1.*u.npc) + # check all using unit vectors and scale factors. + s_phi = s + 1.*u.arcsec * sf['phi'] * e['phi'] + assert_representation_allclose(o_phic, s_phi - s, atol=1e-10*u.kpc) + + o_theta = PhysicsSphericalDifferential(0*u.arcsec, 1*u.arcsec, 0*u.kpc) + o_thetac = o_theta.to_cartesian(base=s) + assert_quantity_allclose(o_thetac[0].xyz, + [0., 0., -np.pi/180./3600.]*u.kpc, + atol=1.*u.npc) + s_theta = s + 1.*u.arcsec * sf['theta'] * e['theta'] + assert_representation_allclose(o_thetac, s_theta - s, atol=1e-10*u.kpc) + s_theta2 = s + o_theta + assert_representation_allclose(s_theta2, s_theta, atol=1e-10*u.kpc) + + o_r = PhysicsSphericalDifferential(0*u.arcsec, 0*u.arcsec, 1*u.mpc) + o_rc = o_r.to_cartesian(base=s) + assert_quantity_allclose(o_rc[0].xyz, [1e-6, 0., 0.]*u.kpc, + atol=1.*u.npc) + s_r = s + 1.*u.mpc * sf['r'] * e['r'] + assert_representation_allclose(o_rc, s_r - s, atol=1e-10*u.kpc) + s_r2 = s + o_r + assert_representation_allclose(s_r2, s_r) + + def test_differential_init_errors(self): + with pytest.raises(u.UnitsError): + PhysicsSphericalDifferential(1.*u.arcsec, 0., 0.) + + +class TestCylindricalDifferential(): + """Test copied from SphericalDifferential, so less extensive.""" + + def setup(self): + s = CylindricalRepresentation(rho=[1, 2, 3] * u.kpc, + phi=[0., 90., 315.] * u.deg, + z=[3, 2, 1] * u.kpc) + self.s = s + self.e = s.unit_vectors() + self.sf = s.scale_factors() + + def test_name(self): + assert CylindricalDifferential.get_name() == 'cylindrical' + assert CylindricalDifferential.get_name() in DIFFERENTIAL_CLASSES + + def test_simple_differentials(self): + s, e, sf = self.s, self.e, self.sf + + o_rho = CylindricalDifferential(1.*u.mpc, 0.*u.arcsec, 0.*u.kpc) + o_rhoc = o_rho.to_cartesian(base=s) + assert_quantity_allclose(o_rhoc[0].xyz, [1.e-6, 0., 0.]*u.kpc) + s_rho = s + 1.*u.mpc * sf['rho'] * e['rho'] + assert_representation_allclose(o_rhoc, s_rho - s, atol=1e-10*u.kpc) + s_rho2 = s + o_rho + assert_representation_allclose(s_rho2, s_rho) + + o_phi = CylindricalDifferential(0.*u.kpc, 1.*u.arcsec, 0.*u.kpc) + o_phic = o_phi.to_cartesian(base=s) + o_phi2 = CylindricalDifferential.from_cartesian(o_phic, base=s) + assert_quantity_allclose(o_phi.d_rho, o_phi2.d_rho, atol=1.*u.npc) + assert_quantity_allclose(o_phi.d_phi, o_phi2.d_phi, atol=1.*u.narcsec) + assert_quantity_allclose(o_phi.d_z, o_phi2.d_z, atol=1.*u.npc) + # simple check by hand for first element. + assert_quantity_allclose(o_phic[0].xyz, + [0., np.pi/180./3600., 0.]*u.kpc) + # check all using unit vectors and scale factors. + s_phi = s + 1.*u.arcsec * sf['phi'] * e['phi'] + assert_representation_allclose(o_phic, s_phi - s, atol=1e-10*u.kpc) + + o_z = CylindricalDifferential(0.*u.kpc, 0.*u.arcsec, 1.*u.mpc) + o_zc = o_z.to_cartesian(base=s) + assert_quantity_allclose(o_zc[0].xyz, [0., 0., 1.e-6]*u.kpc) + s_z = s + 1.*u.mpc * sf['z'] * e['z'] + assert_representation_allclose(o_zc, s_z - s, atol=1e-10*u.kpc) + s_z2 = s + o_z + assert_representation_allclose(s_z2, s_z) + + def test_differential_init_errors(self): + with pytest.raises(u.UnitsError): + CylindricalDifferential(1.*u.pc, 1.*u.arcsec, 3.*u.km/u.s) + + +class TestCartesianDifferential(): + """Test copied from SphericalDifferential, so less extensive.""" + + def setup(self): + s = CartesianRepresentation(x=[1, 2, 3] * u.kpc, + y=[2, 3, 1] * u.kpc, + z=[3, 1, 2] * u.kpc) + self.s = s + self.e = s.unit_vectors() + self.sf = s.scale_factors() + + def test_name(self): + assert CartesianDifferential.get_name() == 'cartesian' + assert CartesianDifferential.get_name() in DIFFERENTIAL_CLASSES + + def test_simple_differentials(self): + s, e, sf = self.s, self.e, self.sf + + for d, differential in ( # test different inits while we're at it. + ('x', CartesianDifferential(1.*u.pc, 0.*u.pc, 0.*u.pc)), + ('y', CartesianDifferential([0., 1., 0.], unit=u.pc)), + ('z', CartesianDifferential(np.array([[0., 0., 1.]]) * u.pc, + xyz_axis=1))): + o_c = differential.to_cartesian(base=s) + o_c2 = differential.to_cartesian() + assert np.all(representation_equal(o_c, o_c2)) + assert all(np.all(getattr(differential, 'd_'+c) == getattr(o_c, c)) + for c in ('x', 'y', 'z')) + differential2 = CartesianDifferential.from_cartesian(o_c) + assert np.all(representation_equal(differential2, differential)) + differential3 = CartesianDifferential.from_cartesian(o_c, base=o_c) + assert np.all(representation_equal(differential3, differential)) + + s_off = s + 1.*u.pc * sf[d] * e[d] + assert_representation_allclose(o_c, s_off - s, atol=1e-10*u.kpc) + s_off2 = s + differential + assert_representation_allclose(s_off2, s_off) + + def test_init_failures(self): + with pytest.raises(ValueError): + CartesianDifferential(1.*u.kpc/u.s, 2.*u.kpc) + with pytest.raises(u.UnitsError): + CartesianDifferential(1.*u.kpc/u.s, 2.*u.kpc, 3.*u.kpc) + with pytest.raises(ValueError): + CartesianDifferential(1.*u.kpc, 2.*u.kpc, 3.*u.kpc, xyz_axis=1) + + +class TestDifferentialConversion(): + def setup(self): + self.s = SphericalRepresentation(lon=[0., 6., 21.] * u.hourangle, + lat=[0., -30., 85.] * u.deg, + distance=[1, 2, 3] * u.kpc) + + @pytest.mark.parametrize('sd_cls', [SphericalDifferential, + SphericalCosLatDifferential]) + def test_represent_as_own_class(self, sd_cls): + so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc) + so2 = so.represent_as(sd_cls) + assert so2 is so + + def test_represent_other_coslat(self): + s = self.s + coslat = np.cos(s.lat) + so = SphericalDifferential(1.*u.deg, 2.*u.deg, 0.1*u.kpc) + so_coslat = so.represent_as(SphericalCosLatDifferential, base=s) + assert_quantity_allclose(so.d_lon * coslat, + so_coslat.d_lon_coslat) + so2 = so_coslat.represent_as(SphericalDifferential, base=s) + assert np.all(representation_equal(so2, so)) + so3 = SphericalDifferential.from_representation(so_coslat, base=s) + assert np.all(representation_equal(so3, so)) + so_coslat2 = SphericalCosLatDifferential.from_representation(so, base=s) + assert np.all(representation_equal(so_coslat2, so_coslat)) + # Also test UnitSpherical + us = s.represent_as(UnitSphericalRepresentation) + uo = so.represent_as(UnitSphericalDifferential) + uo_coslat = so.represent_as(UnitSphericalCosLatDifferential, base=s) + assert_quantity_allclose(uo.d_lon * coslat, + uo_coslat.d_lon_coslat) + uo2 = uo_coslat.represent_as(UnitSphericalDifferential, base=us) + assert np.all(representation_equal(uo2, uo)) + uo3 = UnitSphericalDifferential.from_representation(uo_coslat, base=us) + assert np.all(representation_equal(uo3, uo)) + uo_coslat2 = UnitSphericalCosLatDifferential.from_representation( + uo, base=us) + assert np.all(representation_equal(uo_coslat2, uo_coslat)) + uo_coslat3 = uo.represent_as(UnitSphericalCosLatDifferential, base=us) + assert np.all(representation_equal(uo_coslat3, uo_coslat)) + + @pytest.mark.parametrize('sd_cls', [SphericalDifferential, + SphericalCosLatDifferential]) + @pytest.mark.parametrize('r_cls', (SphericalRepresentation, + UnitSphericalRepresentation, + PhysicsSphericalRepresentation, + CylindricalRepresentation)) + def test_represent_regular_class(self, sd_cls, r_cls): + so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc) + r = so.represent_as(r_cls, base=self.s) + c = so.to_cartesian(self.s) + r_check = c.represent_as(r_cls) + assert np.all(representation_equal(r, r_check)) + so2 = sd_cls.from_representation(r, base=self.s) + so3 = sd_cls.from_cartesian(r.to_cartesian(), self.s) + assert np.all(representation_equal(so2, so3)) + + @pytest.mark.parametrize('sd_cls', [SphericalDifferential, + SphericalCosLatDifferential]) + def test_convert_physics(self, sd_cls): + # Conversion needs no base for SphericalDifferential, but does + # need one (to get the latitude) for SphericalCosLatDifferential. + if sd_cls is SphericalDifferential: + usd_cls = UnitSphericalDifferential + base_s = base_u = base_p = None + else: + usd_cls = UnitSphericalCosLatDifferential + base_s = self.s[1] + base_u = base_s.represent_as(UnitSphericalRepresentation) + base_p = base_s.represent_as(PhysicsSphericalRepresentation) + + so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc) + po = so.represent_as(PhysicsSphericalDifferential, base=base_s) + so2 = sd_cls.from_representation(po, base=base_s) + assert_differential_allclose(so, so2) + po2 = PhysicsSphericalDifferential.from_representation(so, base=base_p) + assert_differential_allclose(po, po2) + so3 = po.represent_as(sd_cls, base=base_p) + assert_differential_allclose(so, so3) + + s = self.s + p = s.represent_as(PhysicsSphericalRepresentation) + cso = so.to_cartesian(s[1]) + cpo = po.to_cartesian(p[1]) + assert_representation_allclose(cso, cpo) + assert_representation_allclose(s[1] + so, p[1] + po) + po2 = so.represent_as(PhysicsSphericalDifferential, + base=None if base_s is None else s) + assert_representation_allclose(s + so, p + po2) + + suo = usd_cls.from_representation(so) + puo = usd_cls.from_representation(po, base=base_u) + assert_differential_allclose(suo, puo) + suo2 = so.represent_as(usd_cls) + puo2 = po.represent_as(usd_cls, base=base_p) + assert_differential_allclose(suo2, puo2) + assert_differential_allclose(puo, puo2) + + sro = RadialDifferential.from_representation(so) + pro = RadialDifferential.from_representation(po) + assert representation_equal(sro, pro) + sro2 = so.represent_as(RadialDifferential) + pro2 = po.represent_as(RadialDifferential) + assert representation_equal(sro2, pro2) + assert representation_equal(pro, pro2) + + @pytest.mark.parametrize( + ('sd_cls', 'usd_cls'), + [(SphericalDifferential, UnitSphericalDifferential), + (SphericalCosLatDifferential, UnitSphericalCosLatDifferential)]) + def test_convert_unit_spherical_radial(self, sd_cls, usd_cls): + s = self.s + us = s.represent_as(UnitSphericalRepresentation) + rs = s.represent_as(RadialRepresentation) + assert_representation_allclose(rs * us, s) + + uo = usd_cls(2.*u.deg, 1.*u.deg) + so = uo.represent_as(sd_cls, base=s) + assert_quantity_allclose(so.d_distance, 0.*u.kpc, atol=1.*u.npc) + uo2 = so.represent_as(usd_cls) + assert_representation_allclose(uo.to_cartesian(us), + uo2.to_cartesian(us)) + so1 = sd_cls(2.*u.deg, 1.*u.deg, 5.*u.pc) + uo_r = so1.represent_as(usd_cls) + ro_r = so1.represent_as(RadialDifferential) + assert np.all(representation_equal(uo_r, uo)) + assert np.all(representation_equal(ro_r, RadialDifferential(5.*u.pc))) + + @pytest.mark.parametrize('sd_cls', [SphericalDifferential, + SphericalCosLatDifferential]) + def test_convert_cylindrial(self, sd_cls): + s = self.s + so = sd_cls(1.*u.deg, 2.*u.deg, 0.1*u.kpc) + cyo = so.represent_as(CylindricalDifferential, base=s) + cy = s.represent_as(CylindricalRepresentation) + so1 = cyo.represent_as(sd_cls, base=cy) + assert_representation_allclose(so.to_cartesian(s), + so1.to_cartesian(s)) + cyo2 = CylindricalDifferential.from_representation(so, base=cy) + assert_representation_allclose(cyo2.to_cartesian(base=cy), + cyo.to_cartesian(base=cy)) + so2 = sd_cls.from_representation(cyo2, base=s) + assert_representation_allclose(so.to_cartesian(s), + so2.to_cartesian(s)) + + @pytest.mark.parametrize('sd_cls', [SphericalDifferential, + SphericalCosLatDifferential]) + def test_combinations(self, sd_cls): + if sd_cls is SphericalDifferential: + uo = UnitSphericalDifferential(2.*u.deg, 1.*u.deg) + uo_d_lon = uo.d_lon + else: + uo = UnitSphericalCosLatDifferential(2.*u.deg, 1.*u.deg) + uo_d_lon = uo.d_lon_coslat + ro = RadialDifferential(1.*u.mpc) + so1 = uo + ro + so1c = sd_cls(uo_d_lon, uo.d_lat, ro.d_distance) + assert np.all(representation_equal(so1, so1c)) + + so2 = uo - ro + so2c = sd_cls(uo_d_lon, uo.d_lat, -ro.d_distance) + assert np.all(representation_equal(so2, so2c)) + so3 = so2 + ro + so3c = sd_cls(uo_d_lon, uo.d_lat, 0.*u.kpc) + assert np.all(representation_equal(so3, so3c)) + so4 = so1 + ro + so4c = sd_cls(uo_d_lon, uo.d_lat, 2*ro.d_distance) + assert np.all(representation_equal(so4, so4c)) + so5 = so1 - uo + so5c = sd_cls(0*u.deg, 0.*u.deg, ro.d_distance) + assert np.all(representation_equal(so5, so5c)) + assert_representation_allclose(self.s + (uo+ro), self.s+so1) + + +@pytest.mark.parametrize('rep,dif', [ + [CartesianRepresentation([1, 2, 3]*u.kpc), + CartesianDifferential([.1, .2, .3]*u.km/u.s)], + [SphericalRepresentation(90*u.deg, 0.*u.deg, 14.*u.kpc), + SphericalDifferential(1.*u.deg, 2.*u.deg, 0.1*u.kpc)] +]) +def test_arithmetic_with_differentials_fail(rep, dif): + + rep = rep.with_differentials(dif) + + with pytest.raises(TypeError): + rep + rep + + with pytest.raises(TypeError): + rep - rep + + with pytest.raises(TypeError): + rep * rep + + with pytest.raises(TypeError): + rep / rep + + with pytest.raises(TypeError): + 10. * rep + + with pytest.raises(TypeError): + rep / 10. + + with pytest.raises(TypeError): + -rep diff --git a/astropy/coordinates/tests/test_representation_methods.py b/astropy/coordinates/tests/test_representation_methods.py new file mode 100644 index 0000000..7ecb634 --- /dev/null +++ b/astropy/coordinates/tests/test_representation_methods.py @@ -0,0 +1,273 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +import pytest +import numpy as np + +from ... import units as u +from .. import (SphericalRepresentation, Longitude, Latitude, + SphericalDifferential) +from ...utils.compat.numpy import broadcast_to as np_broadcast_to + + +class TestManipulation(): + """Manipulation of Representation shapes. + + Checking that attributes are manipulated correctly. + + Even more exhaustive tests are done in time.tests.test_methods + """ + + def setup(self): + lon = Longitude(np.arange(0, 24, 4), u.hourangle) + lat = Latitude(np.arange(-90, 91, 30), u.deg) + + # With same-sized arrays + self.s0 = SphericalRepresentation( + lon[:, np.newaxis] * np.ones(lat.shape), + lat * np.ones(lon.shape)[:, np.newaxis], + np.ones(lon.shape + lat.shape) * u.kpc) + + self.diff = SphericalDifferential( + d_lon=np.ones(self.s0.shape)*u.mas/u.yr, + d_lat=np.ones(self.s0.shape)*u.mas/u.yr, + d_distance=np.ones(self.s0.shape)*u.km/u.s) + self.s0 = self.s0.with_differentials(self.diff) + + # With unequal arrays -> these will be broadcasted. + self.s1 = SphericalRepresentation(lon[:, np.newaxis], lat, 1. * u.kpc, + differentials=self.diff) + + # For completeness on some tests, also a cartesian one + self.c0 = self.s0.to_cartesian() + + def test_ravel(self): + s0_ravel = self.s0.ravel() + assert type(s0_ravel) is type(self.s0) + assert s0_ravel.shape == (self.s0.size,) + assert np.all(s0_ravel.lon == self.s0.lon.ravel()) + assert np.may_share_memory(s0_ravel.lon, self.s0.lon) + assert np.may_share_memory(s0_ravel.lat, self.s0.lat) + assert np.may_share_memory(s0_ravel.distance, self.s0.distance) + assert s0_ravel.differentials['s'].shape == (self.s0.size,) + + # Since s1 was broadcast, ravel needs to make a copy. + s1_ravel = self.s1.ravel() + assert type(s1_ravel) is type(self.s1) + assert s1_ravel.shape == (self.s1.size,) + assert s1_ravel.differentials['s'].shape == (self.s1.size,) + assert np.all(s1_ravel.lon == self.s1.lon.ravel()) + assert not np.may_share_memory(s1_ravel.lat, self.s1.lat) + + def test_copy(self): + s0_copy = self.s0.copy() + s0_copy_diff = s0_copy.differentials['s'] + assert s0_copy.shape == self.s0.shape + assert np.all(s0_copy.lon == self.s0.lon) + assert np.all(s0_copy.lat == self.s0.lat) + + # Check copy was made of internal data. + assert not np.may_share_memory(s0_copy.distance, self.s0.distance) + assert not np.may_share_memory(s0_copy_diff.d_lon, self.diff.d_lon) + + def test_flatten(self): + s0_flatten = self.s0.flatten() + s0_diff = s0_flatten.differentials['s'] + assert s0_flatten.shape == (self.s0.size,) + assert s0_diff.shape == (self.s0.size,) + assert np.all(s0_flatten.lon == self.s0.lon.flatten()) + assert np.all(s0_diff.d_lon == self.diff.d_lon.flatten()) + + # Flatten always copies. + assert not np.may_share_memory(s0_flatten.distance, self.s0.distance) + assert not np.may_share_memory(s0_diff.d_lon, self.diff.d_lon) + + s1_flatten = self.s1.flatten() + assert s1_flatten.shape == (self.s1.size,) + assert np.all(s1_flatten.lon == self.s1.lon.flatten()) + assert not np.may_share_memory(s1_flatten.lat, self.s1.lat) + + def test_transpose(self): + s0_transpose = self.s0.transpose() + s0_diff = s0_transpose.differentials['s'] + assert s0_transpose.shape == (7, 6) + assert s0_diff.shape == s0_transpose.shape + assert np.all(s0_transpose.lon == self.s0.lon.transpose()) + assert np.all(s0_diff.d_lon == self.diff.d_lon.transpose()) + assert np.may_share_memory(s0_transpose.distance, self.s0.distance) + assert np.may_share_memory(s0_diff.d_lon, self.diff.d_lon) + + s1_transpose = self.s1.transpose() + s1_diff = s1_transpose.differentials['s'] + assert s1_transpose.shape == (7, 6) + assert s1_diff.shape == s1_transpose.shape + assert np.all(s1_transpose.lat == self.s1.lat.transpose()) + assert np.all(s1_diff.d_lon == self.diff.d_lon.transpose()) + assert np.may_share_memory(s1_transpose.lat, self.s1.lat) + assert np.may_share_memory(s1_diff.d_lon, self.diff.d_lon) + + # Only one check on T, since it just calls transpose anyway. + # Doing it on the CartesianRepresentation just for variety's sake. + c0_T = self.c0.T + assert c0_T.shape == (7, 6) + assert np.all(c0_T.x == self.c0.x.T) + assert np.may_share_memory(c0_T.y, self.c0.y) + + def test_diagonal(self): + s0_diagonal = self.s0.diagonal() + s0_diff = s0_diagonal.differentials['s'] + assert s0_diagonal.shape == (6,) + assert s0_diff.shape == s0_diagonal.shape + assert np.all(s0_diagonal.lat == self.s0.lat.diagonal()) + assert np.all(s0_diff.d_lon == self.diff.d_lon.diagonal()) + assert np.may_share_memory(s0_diagonal.lat, self.s0.lat) + assert np.may_share_memory(s0_diff.d_lon, self.diff.d_lon) + + def test_swapaxes(self): + s1_swapaxes = self.s1.swapaxes(0, 1) + s1_diff = s1_swapaxes.differentials['s'] + assert s1_swapaxes.shape == (7, 6) + assert s1_diff.shape == s1_swapaxes.shape + assert np.all(s1_swapaxes.lat == self.s1.lat.swapaxes(0, 1)) + assert np.all(s1_diff.d_lon == self.diff.d_lon.swapaxes(0, 1)) + assert np.may_share_memory(s1_swapaxes.lat, self.s1.lat) + assert np.may_share_memory(s1_diff.d_lon, self.diff.d_lon) + + def test_reshape(self): + s0_reshape = self.s0.reshape(2, 3, 7) + s0_diff = s0_reshape.differentials['s'] + assert s0_reshape.shape == (2, 3, 7) + assert s0_diff.shape == s0_reshape.shape + assert np.all(s0_reshape.lon == self.s0.lon.reshape(2, 3, 7)) + assert np.all(s0_reshape.lat == self.s0.lat.reshape(2, 3, 7)) + assert np.all(s0_reshape.distance == self.s0.distance.reshape(2, 3, 7)) + assert np.may_share_memory(s0_reshape.lon, self.s0.lon) + assert np.may_share_memory(s0_reshape.lat, self.s0.lat) + assert np.may_share_memory(s0_reshape.distance, self.s0.distance) + + s1_reshape = self.s1.reshape(3, 2, 7) + s1_diff = s1_reshape.differentials['s'] + assert s1_reshape.shape == (3, 2, 7) + assert s1_diff.shape == s1_reshape.shape + assert np.all(s1_reshape.lat == self.s1.lat.reshape(3, 2, 7)) + assert np.all(s1_diff.d_lon == self.diff.d_lon.reshape(3, 2, 7)) + assert np.may_share_memory(s1_reshape.lat, self.s1.lat) + assert np.may_share_memory(s1_diff.d_lon, self.diff.d_lon) + + # For reshape(3, 14), copying is necessary for lon, lat, but not for d + s1_reshape2 = self.s1.reshape(3, 14) + assert s1_reshape2.shape == (3, 14) + assert np.all(s1_reshape2.lon == self.s1.lon.reshape(3, 14)) + assert not np.may_share_memory(s1_reshape2.lon, self.s1.lon) + assert s1_reshape2.distance.shape == (3, 14) + assert np.may_share_memory(s1_reshape2.distance, self.s1.distance) + + def test_shape_setting(self): + # Shape-setting should be on the object itself, since copying removes + # zero-strides due to broadcasting. We reset the objects at the end. + self.s0.shape = (2, 3, 7) + assert self.s0.shape == (2, 3, 7) + assert self.s0.lon.shape == (2, 3, 7) + assert self.s0.lat.shape == (2, 3, 7) + assert self.s0.distance.shape == (2, 3, 7) + assert self.diff.shape == (2, 3, 7) + assert self.diff.d_lon.shape == (2, 3, 7) + assert self.diff.d_lat.shape == (2, 3, 7) + assert self.diff.d_distance.shape == (2, 3, 7) + + # this works with the broadcasting. + self.s1.shape = (2, 3, 7) + assert self.s1.shape == (2, 3, 7) + assert self.s1.lon.shape == (2, 3, 7) + assert self.s1.lat.shape == (2, 3, 7) + assert self.s1.distance.shape == (2, 3, 7) + assert self.s1.distance.strides == (0, 0, 0) + + # but this one does not. + oldshape = self.s1.shape + with pytest.raises(AttributeError): + self.s1.shape = (42,) + assert self.s1.shape == oldshape + assert self.s1.lon.shape == oldshape + assert self.s1.lat.shape == oldshape + assert self.s1.distance.shape == oldshape + + # Finally, a more complicated one that checks that things get reset + # properly if it is not the first component that fails. + s2 = SphericalRepresentation(self.s1.lon.copy(), self.s1.lat, + self.s1.distance, copy=False) + assert 0 not in s2.lon.strides + assert 0 in s2.lat.strides + with pytest.raises(AttributeError): + s2.shape = (42,) + assert s2.shape == oldshape + assert s2.lon.shape == oldshape + assert s2.lat.shape == oldshape + assert s2.distance.shape == oldshape + assert 0 not in s2.lon.strides + assert 0 in s2.lat.strides + self.setup() + + def test_squeeze(self): + s0_squeeze = self.s0.reshape(3, 1, 2, 1, 7).squeeze() + s0_diff = s0_squeeze.differentials['s'] + assert s0_squeeze.shape == (3, 2, 7) + assert s0_diff.shape == s0_squeeze.shape + assert np.all(s0_squeeze.lat == self.s0.lat.reshape(3, 2, 7)) + assert np.all(s0_diff.d_lon == self.diff.d_lon.reshape(3, 2, 7)) + assert np.may_share_memory(s0_squeeze.lat, self.s0.lat) + + def test_add_dimension(self): + s0_adddim = self.s0[:, np.newaxis, :] + s0_diff = s0_adddim.differentials['s'] + assert s0_adddim.shape == (6, 1, 7) + assert s0_diff.shape == s0_adddim.shape + assert np.all(s0_adddim.lon == self.s0.lon[:, np.newaxis, :]) + assert np.all(s0_diff.d_lon == self.diff.d_lon[:, np.newaxis, :]) + assert np.may_share_memory(s0_adddim.lat, self.s0.lat) + + def test_take(self): + s0_take = self.s0.take((5, 2)) + s0_diff = s0_take.differentials['s'] + assert s0_take.shape == (2,) + assert s0_diff.shape == s0_take.shape + assert np.all(s0_take.lon == self.s0.lon.take((5, 2))) + assert np.all(s0_diff.d_lon == self.diff.d_lon.take((5, 2))) + + def test_broadcast_to(self): + s0_broadcast = self.s0._apply(np_broadcast_to, (3, 6, 7), subok=True) + s0_diff = s0_broadcast.differentials['s'] + assert type(s0_broadcast) is type(self.s0) + assert s0_broadcast.shape == (3, 6, 7) + assert s0_diff.shape == s0_broadcast.shape + assert np.all(s0_broadcast.lon == self.s0.lon) + assert np.all(s0_broadcast.lat == self.s0.lat) + assert np.all(s0_broadcast.distance == self.s0.distance) + assert np.may_share_memory(s0_broadcast.lon, self.s0.lon) + assert np.may_share_memory(s0_broadcast.lat, self.s0.lat) + assert np.may_share_memory(s0_broadcast.distance, self.s0.distance) + + s1_broadcast = self.s1._apply(np_broadcast_to, shape=(3, 6, 7), + subok=True) + s1_diff = s1_broadcast.differentials['s'] + assert s1_broadcast.shape == (3, 6, 7) + assert s1_diff.shape == s1_broadcast.shape + assert np.all(s1_broadcast.lat == self.s1.lat) + assert np.all(s1_broadcast.lon == self.s1.lon) + assert np.all(s1_broadcast.distance == self.s1.distance) + assert s1_broadcast.distance.shape == (3, 6, 7) + assert np.may_share_memory(s1_broadcast.lat, self.s1.lat) + assert np.may_share_memory(s1_broadcast.lon, self.s1.lon) + assert np.may_share_memory(s1_broadcast.distance, self.s1.distance) + + # A final test that "may_share_memory" equals "does_share_memory" + # Do this on a copy, to keep self.s0 unchanged. + sc = self.s0.copy() + assert not np.may_share_memory(sc.lon, self.s0.lon) + assert not np.may_share_memory(sc.lat, self.s0.lat) + sc_broadcast = sc._apply(np_broadcast_to, (3, 6, 7), subok=True) + assert np.may_share_memory(sc_broadcast.lon, sc.lon) + # Can only write to copy, not to broadcast version. + sc.lon[0, 0] = 22. * u.hourangle + assert np.all(sc_broadcast.lon[:, 0, 0] == 22. * u.hourangle) diff --git a/astropy/coordinates/tests/test_shape_manipulation.py b/astropy/coordinates/tests/test_shape_manipulation.py new file mode 100644 index 0000000..55654af --- /dev/null +++ b/astropy/coordinates/tests/test_shape_manipulation.py @@ -0,0 +1,267 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS +import numpy as np + +from ... import units as u +from .. import Longitude, Latitude, EarthLocation, SkyCoord +# test on frame with most complicated frame attributes. +from ..builtin_frames import ICRS, AltAz, GCRS +from ...time import Time + + +class TestManipulation(): + """Manipulation of Frame shapes. + + Checking that attributes are manipulated correctly. + + Even more exhaustive tests are done in time.tests.test_methods + """ + + def setup(self): + lon = Longitude(np.arange(0, 24, 4), u.hourangle) + lat = Latitude(np.arange(-90, 91, 30), u.deg) + # With same-sized arrays, no attributes + self.s0 = ICRS(lon[:, np.newaxis] * np.ones(lat.shape), + lat * np.ones(lon.shape)[:, np.newaxis]) + # Make an AltAz frame since that has many types of attributes. + # Match one axis with times. + self.obstime = (Time('2012-01-01') + + np.arange(len(lon))[:, np.newaxis] * u.s) + # And another with location. + self.location = EarthLocation(20.*u.deg, lat, 100*u.m) + # Ensure we have a quantity scalar. + self.pressure = 1000 * u.hPa + # As well as an array. + self.temperature = np.random.uniform( + 0., 20., size=(lon.size, lat.size)) * u.deg_C + self.s1 = AltAz(az=lon[:, np.newaxis], alt=lat, + obstime=self.obstime, + location=self.location, + pressure=self.pressure, + temperature=self.temperature) + # For some tests, also try a GCRS, since that has representation + # attributes. We match the second dimension (via the location) + self.obsgeoloc, self.obsgeovel = self.location.get_gcrs_posvel( + self.obstime[0, 0]) + self.s2 = GCRS(ra=lon[:, np.newaxis], dec=lat, + obstime=self.obstime, + obsgeoloc=self.obsgeoloc, + obsgeovel=self.obsgeovel) + # For completeness, also some tests on an empty frame. + self.s3 = GCRS(obstime=self.obstime, + obsgeoloc=self.obsgeoloc, + obsgeovel=self.obsgeovel) + # And make a SkyCoord + self.sc = SkyCoord(ra=lon[:, np.newaxis], dec=lat, frame=self.s3) + + def test_ravel(self): + s0_ravel = self.s0.ravel() + assert s0_ravel.shape == (self.s0.size,) + assert np.all(s0_ravel.data.lon == self.s0.data.lon.ravel()) + assert np.may_share_memory(s0_ravel.data.lon, self.s0.data.lon) + assert np.may_share_memory(s0_ravel.data.lat, self.s0.data.lat) + # Since s1 lon, lat were broadcast, ravel needs to make a copy. + s1_ravel = self.s1.ravel() + assert s1_ravel.shape == (self.s1.size,) + assert np.all(s1_ravel.data.lon == self.s1.data.lon.ravel()) + assert not np.may_share_memory(s1_ravel.data.lat, self.s1.data.lat) + assert np.all(s1_ravel.obstime == self.s1.obstime.ravel()) + assert not np.may_share_memory(s1_ravel.obstime.jd1, + self.s1.obstime.jd1) + assert np.all(s1_ravel.location == self.s1.location.ravel()) + assert not np.may_share_memory(s1_ravel.location, self.s1.location) + assert np.all(s1_ravel.temperature == self.s1.temperature.ravel()) + assert np.may_share_memory(s1_ravel.temperature, self.s1.temperature) + assert s1_ravel.pressure == self.s1.pressure + s2_ravel = self.s2.ravel() + assert s2_ravel.shape == (self.s2.size,) + assert np.all(s2_ravel.data.lon == self.s2.data.lon.ravel()) + assert not np.may_share_memory(s2_ravel.data.lat, self.s2.data.lat) + assert np.all(s2_ravel.obstime == self.s2.obstime.ravel()) + assert not np.may_share_memory(s2_ravel.obstime.jd1, + self.s2.obstime.jd1) + # CartesianRepresentation do not allow direct comparisons, as this is + # too tricky to get right in the face of rounding issues. Here, though, + # it cannot be an issue, so we compare the xyz quantities. + assert np.all(s2_ravel.obsgeoloc.xyz == self.s2.obsgeoloc.ravel().xyz) + assert not np.may_share_memory(s2_ravel.obsgeoloc.x, + self.s2.obsgeoloc.x) + s3_ravel = self.s3.ravel() + assert s3_ravel.shape == (42,) # cannot use .size on frame w/o data. + assert np.all(s3_ravel.obstime == self.s3.obstime.ravel()) + assert not np.may_share_memory(s3_ravel.obstime.jd1, + self.s3.obstime.jd1) + assert np.all(s3_ravel.obsgeoloc.xyz == self.s3.obsgeoloc.ravel().xyz) + assert not np.may_share_memory(s3_ravel.obsgeoloc.x, + self.s3.obsgeoloc.x) + sc_ravel = self.sc.ravel() + assert sc_ravel.shape == (self.sc.size,) + assert np.all(sc_ravel.data.lon == self.sc.data.lon.ravel()) + assert not np.may_share_memory(sc_ravel.data.lat, self.sc.data.lat) + assert np.all(sc_ravel.obstime == self.sc.obstime.ravel()) + assert not np.may_share_memory(sc_ravel.obstime.jd1, + self.sc.obstime.jd1) + assert np.all(sc_ravel.obsgeoloc.xyz == self.sc.obsgeoloc.ravel().xyz) + assert not np.may_share_memory(sc_ravel.obsgeoloc.x, + self.sc.obsgeoloc.x) + + def test_flatten(self): + s0_flatten = self.s0.flatten() + assert s0_flatten.shape == (self.s0.size,) + assert np.all(s0_flatten.data.lon == self.s0.data.lon.flatten()) + # Flatten always copies. + assert not np.may_share_memory(s0_flatten.data.lat, self.s0.data.lat) + s1_flatten = self.s1.flatten() + assert s1_flatten.shape == (self.s1.size,) + assert np.all(s1_flatten.data.lat == self.s1.data.lat.flatten()) + assert not np.may_share_memory(s1_flatten.data.lon, self.s1.data.lat) + assert np.all(s1_flatten.obstime == self.s1.obstime.flatten()) + assert not np.may_share_memory(s1_flatten.obstime.jd1, + self.s1.obstime.jd1) + assert np.all(s1_flatten.location == self.s1.location.flatten()) + assert not np.may_share_memory(s1_flatten.location, self.s1.location) + assert np.all(s1_flatten.temperature == self.s1.temperature.flatten()) + assert not np.may_share_memory(s1_flatten.temperature, + self.s1.temperature) + assert s1_flatten.pressure == self.s1.pressure + + def test_transpose(self): + s0_transpose = self.s0.transpose() + assert s0_transpose.shape == (7, 6) + assert np.all(s0_transpose.data.lon == self.s0.data.lon.transpose()) + assert np.may_share_memory(s0_transpose.data.lat, self.s0.data.lat) + s1_transpose = self.s1.transpose() + assert s1_transpose.shape == (7, 6) + assert np.all(s1_transpose.data.lat == self.s1.data.lat.transpose()) + assert np.may_share_memory(s1_transpose.data.lon, self.s1.data.lon) + assert np.all(s1_transpose.obstime == self.s1.obstime.transpose()) + assert np.may_share_memory(s1_transpose.obstime.jd1, + self.s1.obstime.jd1) + assert np.all(s1_transpose.location == self.s1.location.transpose()) + assert np.may_share_memory(s1_transpose.location, self.s1.location) + assert np.all(s1_transpose.temperature == + self.s1.temperature.transpose()) + assert np.may_share_memory(s1_transpose.temperature, + self.s1.temperature) + assert s1_transpose.pressure == self.s1.pressure + # Only one check on T, since it just calls transpose anyway. + s1_T = self.s1.T + assert s1_T.shape == (7, 6) + assert np.all(s1_T.temperature == self.s1.temperature.T) + assert np.may_share_memory(s1_T.location, self.s1.location) + + def test_diagonal(self): + s0_diagonal = self.s0.diagonal() + assert s0_diagonal.shape == (6,) + assert np.all(s0_diagonal.data.lat == self.s0.data.lat.diagonal()) + assert np.may_share_memory(s0_diagonal.data.lat, self.s0.data.lat) + + def test_swapaxes(self): + s1_swapaxes = self.s1.swapaxes(0, 1) + assert s1_swapaxes.shape == (7, 6) + assert np.all(s1_swapaxes.data.lat == self.s1.data.lat.swapaxes(0, 1)) + assert np.may_share_memory(s1_swapaxes.data.lat, self.s1.data.lat) + assert np.all(s1_swapaxes.obstime == self.s1.obstime.swapaxes(0, 1)) + assert np.may_share_memory(s1_swapaxes.obstime.jd1, + self.s1.obstime.jd1) + assert np.all(s1_swapaxes.location == self.s1.location.swapaxes(0, 1)) + assert s1_swapaxes.location.shape == (7, 6) + assert np.may_share_memory(s1_swapaxes.location, self.s1.location) + assert np.all(s1_swapaxes.temperature == + self.s1.temperature.swapaxes(0, 1)) + assert np.may_share_memory(s1_swapaxes.temperature, + self.s1.temperature) + assert s1_swapaxes.pressure == self.s1.pressure + + def test_reshape(self): + s0_reshape = self.s0.reshape(2, 3, 7) + assert s0_reshape.shape == (2, 3, 7) + assert np.all(s0_reshape.data.lon == self.s0.data.lon.reshape(2, 3, 7)) + assert np.all(s0_reshape.data.lat == self.s0.data.lat.reshape(2, 3, 7)) + assert np.may_share_memory(s0_reshape.data.lon, self.s0.data.lon) + assert np.may_share_memory(s0_reshape.data.lat, self.s0.data.lat) + s1_reshape = self.s1.reshape(3, 2, 7) + assert s1_reshape.shape == (3, 2, 7) + assert np.all(s1_reshape.data.lat == self.s1.data.lat.reshape(3, 2, 7)) + assert np.may_share_memory(s1_reshape.data.lat, self.s1.data.lat) + assert np.all(s1_reshape.obstime == self.s1.obstime.reshape(3, 2, 7)) + assert np.may_share_memory(s1_reshape.obstime.jd1, + self.s1.obstime.jd1) + assert np.all(s1_reshape.location == self.s1.location.reshape(3, 2, 7)) + assert np.may_share_memory(s1_reshape.location, self.s1.location) + assert np.all(s1_reshape.temperature == + self.s1.temperature.reshape(3, 2, 7)) + assert np.may_share_memory(s1_reshape.temperature, + self.s1.temperature) + assert s1_reshape.pressure == self.s1.pressure + # For reshape(3, 14), copying is necessary for lon, lat, location, time + s1_reshape2 = self.s1.reshape(3, 14) + assert s1_reshape2.shape == (3, 14) + assert np.all(s1_reshape2.data.lon == self.s1.data.lon.reshape(3, 14)) + assert not np.may_share_memory(s1_reshape2.data.lon, self.s1.data.lon) + assert np.all(s1_reshape2.obstime == self.s1.obstime.reshape(3, 14)) + assert not np.may_share_memory(s1_reshape2.obstime.jd1, + self.s1.obstime.jd1) + assert np.all(s1_reshape2.location == self.s1.location.reshape(3, 14)) + assert not np.may_share_memory(s1_reshape2.location, self.s1.location) + assert np.all(s1_reshape2.temperature == + self.s1.temperature.reshape(3, 14)) + assert np.may_share_memory(s1_reshape2.temperature, + self.s1.temperature) + assert s1_reshape2.pressure == self.s1.pressure + s2_reshape = self.s2.reshape(3, 2, 7) + assert s2_reshape.shape == (3, 2, 7) + assert np.all(s2_reshape.data.lon == self.s2.data.lon.reshape(3, 2, 7)) + assert np.may_share_memory(s2_reshape.data.lat, self.s2.data.lat) + assert np.all(s2_reshape.obstime == self.s2.obstime.reshape(3, 2, 7)) + assert np.may_share_memory(s2_reshape.obstime.jd1, self.s2.obstime.jd1) + assert np.all(s2_reshape.obsgeoloc.xyz == + self.s2.obsgeoloc.reshape(3, 2, 7).xyz) + assert np.may_share_memory(s2_reshape.obsgeoloc.x, self.s2.obsgeoloc.x) + s3_reshape = self.s3.reshape(3, 2, 7) + assert s3_reshape.shape == (3, 2, 7) + assert np.all(s3_reshape.obstime == self.s3.obstime.reshape(3, 2, 7)) + assert np.may_share_memory(s3_reshape.obstime.jd1, self.s3.obstime.jd1) + assert np.all(s3_reshape.obsgeoloc.xyz == + self.s3.obsgeoloc.reshape(3, 2, 7).xyz) + assert np.may_share_memory(s3_reshape.obsgeoloc.x, self.s3.obsgeoloc.x) + sc_reshape = self.sc.reshape(3, 2, 7) + assert sc_reshape.shape == (3, 2, 7) + assert np.all(sc_reshape.data.lon == self.sc.data.lon.reshape(3, 2, 7)) + assert np.may_share_memory(sc_reshape.data.lat, self.sc.data.lat) + assert np.all(sc_reshape.obstime == self.sc.obstime.reshape(3, 2, 7)) + assert np.may_share_memory(sc_reshape.obstime.jd1, self.sc.obstime.jd1) + assert np.all(sc_reshape.obsgeoloc.xyz == + self.sc.obsgeoloc.reshape(3, 2, 7).xyz) + assert np.may_share_memory(sc_reshape.obsgeoloc.x, self.sc.obsgeoloc.x) + # For reshape(3, 14), the arrays all need to be copied. + sc_reshape2 = self.sc.reshape(3, 14) + assert sc_reshape2.shape == (3, 14) + assert np.all(sc_reshape2.data.lon == self.sc.data.lon.reshape(3, 14)) + assert not np.may_share_memory(sc_reshape2.data.lat, + self.sc.data.lat) + assert np.all(sc_reshape2.obstime == self.sc.obstime.reshape(3, 14)) + assert not np.may_share_memory(sc_reshape2.obstime.jd1, + self.sc.obstime.jd1) + assert np.all(sc_reshape2.obsgeoloc.xyz == + self.sc.obsgeoloc.reshape(3, 14).xyz) + assert not np.may_share_memory(sc_reshape2.obsgeoloc.x, + self.sc.obsgeoloc.x) + + def test_squeeze(self): + s0_squeeze = self.s0.reshape(3, 1, 2, 1, 7).squeeze() + assert s0_squeeze.shape == (3, 2, 7) + assert np.all(s0_squeeze.data.lat == self.s0.data.lat.reshape(3, 2, 7)) + assert np.may_share_memory(s0_squeeze.data.lat, self.s0.data.lat) + + def test_add_dimension(self): + s0_adddim = self.s0[:, np.newaxis, :] + assert s0_adddim.shape == (6, 1, 7) + assert np.all(s0_adddim.data.lon == self.s0.data.lon[:, np.newaxis, :]) + assert np.may_share_memory(s0_adddim.data.lat, self.s0.data.lat) + + def test_take(self): + s0_take = self.s0.take((5, 2)) + assert s0_take.shape == (2,) + assert np.all(s0_take.data.lon == self.s0.data.lon.take((5, 2))) diff --git a/astropy/coordinates/tests/test_sites.py b/astropy/coordinates/tests/test_sites.py new file mode 100644 index 0000000..00490f9 --- /dev/null +++ b/astropy/coordinates/tests/test_sites.py @@ -0,0 +1,170 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest + +from ...tests.helper import assert_quantity_allclose, remote_data, quantity_allclose +from ... import units as u +from .. import Longitude, Latitude, EarthLocation +from ..sites import get_builtin_sites, get_downloaded_sites, SiteRegistry + + +def test_builtin_sites(): + reg = get_builtin_sites() + + greenwich = reg['greenwich'] + lon, lat, el = greenwich.to_geodetic() + assert_quantity_allclose(lon, Longitude('0:0:0', unit=u.deg), + atol=10*u.arcsec) + assert_quantity_allclose(lat, Latitude('51:28:40', unit=u.deg), + atol=1*u.arcsec) + assert_quantity_allclose(el, 46*u.m, atol=1*u.m) + + names = reg.names + assert 'greenwich' in names + assert 'example_site' in names + + with pytest.raises(KeyError) as exc: + reg['nonexistent site'] + assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use the 'names' attribute to see available sites." + + +@remote_data(source='astropy') +def test_online_sites(): + reg = get_downloaded_sites() + + keck = reg['keck'] + lon, lat, el = keck.to_geodetic() + assert_quantity_allclose(lon, -Longitude('155:28.7', unit=u.deg), + atol=0.001*u.deg) + assert_quantity_allclose(lat, Latitude('19:49.7', unit=u.deg), + atol=0.001*u.deg) + assert_quantity_allclose(el, 4160*u.m, atol=1*u.m) + + names = reg.names + assert 'keck' in names + assert 'ctio' in names + + with pytest.raises(KeyError) as exc: + reg['nonexistent site'] + assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use the 'names' attribute to see available sites." + + with pytest.raises(KeyError) as exc: + reg['kec'] + assert exc.value.args[0] == "Site 'kec' not in database. Use the 'names' attribute to see available sites. Did you mean one of: 'keck'?'" + + +@remote_data(source='astropy') +# this will *try* the online so we have to make it remote_data, even though it +# could fall back on the non-remote version +def test_EarthLocation_basic(): + greenwichel = EarthLocation.of_site('greenwich') + lon, lat, el = greenwichel.to_geodetic() + assert_quantity_allclose(lon, Longitude('0:0:0', unit=u.deg), + atol=10*u.arcsec) + assert_quantity_allclose(lat, Latitude('51:28:40', unit=u.deg), + atol=1*u.arcsec) + assert_quantity_allclose(el, 46*u.m, atol=1*u.m) + + names = EarthLocation.get_site_names() + assert 'greenwich' in names + assert 'example_site' in names + + with pytest.raises(KeyError) as exc: + EarthLocation.of_site('nonexistent site') + assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use EarthLocation.get_site_names to see available sites." + + +def test_EarthLocation_state_offline(): + EarthLocation._site_registry = None + EarthLocation._get_site_registry(force_builtin=True) + assert EarthLocation._site_registry is not None + + oldreg = EarthLocation._site_registry + newreg = EarthLocation._get_site_registry() + assert oldreg is newreg + newreg = EarthLocation._get_site_registry(force_builtin=True) + assert oldreg is not newreg + + +@remote_data(source='astropy') +def test_EarthLocation_state_online(): + EarthLocation._site_registry = None + EarthLocation._get_site_registry(force_download=True) + assert EarthLocation._site_registry is not None + + oldreg = EarthLocation._site_registry + newreg = EarthLocation._get_site_registry() + assert oldreg is newreg + newreg = EarthLocation._get_site_registry(force_download=True) + assert oldreg is not newreg + + +def test_registry(): + reg = SiteRegistry() + + assert len(reg.names) == 0 + + names = ['sitea', 'site A'] + loc = EarthLocation.from_geodetic(lat=1*u.deg, lon=2*u.deg, height=3*u.km) + reg.add_site(names, loc) + + assert len(reg.names) == 2 + + loc1 = reg['SIteA'] + assert loc1 is loc + + loc2 = reg['sIte a'] + assert loc2 is loc + + +def test_non_EarthLocation(): + """ + A regression test for a typo bug pointed out at the bottom of + https://github.com/astropy/astropy/pull/4042 + """ + class EarthLocation2(EarthLocation): + pass + + # This lets keeps us from needing to do remote_data + # note that this does *not* mess up the registry for EarthLocation because + # registry is cached on a per-class basis + EarthLocation2._get_site_registry(force_builtin=True) + + el2 = EarthLocation2.of_site('greenwich') + assert type(el2) is EarthLocation2 + assert el2.info.name == 'Royal Observatory Greenwich' + + +def check_builtin_matches_remote(download_url=True): + """ + This function checks that the builtin sites registry is consistent with the + remote registry (or a registry at some other location). + + Note that current this is *not* run by the testing suite (because it + doesn't start with "test", and is instead meant to be used as a check + before merging changes in astropy-data) + """ + builtin_registry = EarthLocation._get_site_registry(force_builtin=True) + dl_registry = EarthLocation._get_site_registry(force_download=download_url) + + in_dl = {} + matches = {} + for name in builtin_registry.names: + in_dl[name] = name in dl_registry + if in_dl[name]: + matches[name] = quantity_allclose(builtin_registry[name], dl_registry[name]) + else: + matches[name] = False + + if not all(matches.values()): + # this makes sure we actually see which don't match + print("In builtin registry but not in download:") + for name in in_dl: + if not in_dl[name]: + print(' ', name) + print("In both but not the same value:") + for name in matches: + if not matches[name] and in_dl[name]: + print(' ', name, 'builtin:', builtin_registry[name], 'download:', dl_registry[name]) + assert False, "Builtin and download registry aren't consistent - failures printed to stdout" diff --git a/astropy/coordinates/tests/test_sky_coord.py b/astropy/coordinates/tests/test_sky_coord.py new file mode 100644 index 0000000..d4b9b44 --- /dev/null +++ b/astropy/coordinates/tests/test_sky_coord.py @@ -0,0 +1,1389 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +Tests for the SkyCoord class. Note that there are also SkyCoord tests in +test_api_ape5.py +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import copy + +import pytest +import numpy as np +import numpy.testing as npt + +from ... import units as u +from ...tests.helper import (remote_data, catch_warnings, + quantity_allclose, + assert_quantity_allclose as assert_allclose) +from ...extern.six.moves import zip +from ..representation import REPRESENTATION_CLASSES +from ...coordinates import (ICRS, FK4, FK5, Galactic, SkyCoord, Angle, + SphericalRepresentation, CartesianRepresentation, + UnitSphericalRepresentation, AltAz, + BaseCoordinateFrame, Attribute, + frame_transform_graph) +from ...coordinates import Latitude, EarthLocation +from ...time import Time +from ...utils import minversion, isiterable +from ...utils.compat import NUMPY_LT_1_14 +from ...utils.exceptions import AstropyDeprecationWarning + +RA = 1.0 * u.deg +DEC = 2.0 * u.deg +C_ICRS = ICRS(RA, DEC) +C_FK5 = C_ICRS.transform_to(FK5) +J2001 = Time('J2001', scale='utc') + + +def allclose(a, b, rtol=0.0, atol=None): + if atol is None: + atol = 1.e-8 * getattr(a, 'unit', 1.) + return quantity_allclose(a, b, rtol, atol) + + +try: + import scipy + HAS_SCIPY = True +except ImportError: + HAS_SCIPY = False + +if HAS_SCIPY and minversion(scipy, '0.12.0', inclusive=False): + OLDER_SCIPY = False +else: + OLDER_SCIPY = True + + +def test_transform_to(): + for frame in (FK5, FK5(equinox=Time('J1975.0')), + FK4, FK4(equinox=Time('J1975.0')), + SkyCoord(RA, DEC, 'fk4', equinox='J1980')): + c_frame = C_ICRS.transform_to(frame) + s_icrs = SkyCoord(RA, DEC, frame='icrs') + s_frame = s_icrs.transform_to(frame) + assert allclose(c_frame.ra, s_frame.ra) + assert allclose(c_frame.dec, s_frame.dec) + assert allclose(c_frame.distance, s_frame.distance) + + +# set up for parametrized test +rt_sets = [] +rt_frames = [ICRS, FK4, FK5, Galactic] +for rt_frame0 in rt_frames: + for rt_frame1 in rt_frames: + for equinox0 in (None, 'J1975.0'): + for obstime0 in (None, 'J1980.0'): + for equinox1 in (None, 'J1975.0'): + for obstime1 in (None, 'J1980.0'): + rt_sets.append((rt_frame0, rt_frame1, + equinox0, equinox1, + obstime0, obstime1)) +rt_args = ('frame0', 'frame1', 'equinox0', 'equinox1', 'obstime0', 'obstime1') + + +@pytest.mark.parametrize(rt_args, rt_sets) +def test_round_tripping(frame0, frame1, equinox0, equinox1, obstime0, obstime1): + """ + Test round tripping out and back using transform_to in every combination. + """ + attrs0 = {'equinox': equinox0, 'obstime': obstime0} + attrs1 = {'equinox': equinox1, 'obstime': obstime1} + + # Remove None values + attrs0 = dict((k, v) for k, v in attrs0.items() if v is not None) + attrs1 = dict((k, v) for k, v in attrs1.items() if v is not None) + + # Go out and back + sc = SkyCoord(frame0, RA, DEC, **attrs0) + + # Keep only frame attributes for frame1 + attrs1 = dict((attr, val) for attr, val in attrs1.items() + if attr in frame1.get_frame_attr_names()) + sc2 = sc.transform_to(frame1(**attrs1)) + + # When coming back only keep frame0 attributes for transform_to + attrs0 = dict((attr, val) for attr, val in attrs0.items() + if attr in frame0.get_frame_attr_names()) + # also, if any are None, fill in with defaults + for attrnm in frame0.get_frame_attr_names(): + if attrs0.get(attrnm, None) is None: + if attrnm == 'obstime' and frame0.get_frame_attr_names()[attrnm] is None: + if 'equinox' in attrs0: + attrs0[attrnm] = attrs0['equinox'] + else: + attrs0[attrnm] = frame0.get_frame_attr_names()[attrnm] + sc_rt = sc2.transform_to(frame0(**attrs0)) + + if frame0 is Galactic: + assert allclose(sc.l, sc_rt.l) + assert allclose(sc.b, sc_rt.b) + else: + assert allclose(sc.ra, sc_rt.ra) + assert allclose(sc.dec, sc_rt.dec) + if equinox0: + assert type(sc.equinox) is Time and sc.equinox == sc_rt.equinox + if obstime0: + assert type(sc.obstime) is Time and sc.obstime == sc_rt.obstime + + +def test_coord_init_string(): + """ + Spherical or Cartesian represenation input coordinates. + """ + sc = SkyCoord('1d 2d') + assert allclose(sc.ra, 1 * u.deg) + assert allclose(sc.dec, 2 * u.deg) + + sc = SkyCoord('1d', '2d') + assert allclose(sc.ra, 1 * u.deg) + assert allclose(sc.dec, 2 * u.deg) + + sc = SkyCoord('1°2′3″', '2°3′4″') + assert allclose(sc.ra, Angle('1°2′3″')) + assert allclose(sc.dec, Angle('2°3′4″')) + + sc = SkyCoord('1°2′3″ 2°3′4″') + assert allclose(sc.ra, Angle('1°2′3″')) + assert allclose(sc.dec, Angle('2°3′4″')) + + with pytest.raises(ValueError) as err: + SkyCoord('1d 2d 3d') + assert "Cannot parse first argument data" in str(err) + + sc1 = SkyCoord('8 00 00 +5 00 00.0', unit=(u.hour, u.deg), frame='icrs') + assert isinstance(sc1, SkyCoord) + assert allclose(sc1.ra, Angle(120 * u.deg)) + assert allclose(sc1.dec, Angle(5 * u.deg)) + + sc11 = SkyCoord('8h00m00s+5d00m00.0s', unit=(u.hour, u.deg), frame='icrs') + assert isinstance(sc11, SkyCoord) + assert allclose(sc1.ra, Angle(120 * u.deg)) + assert allclose(sc1.dec, Angle(5 * u.deg)) + + sc2 = SkyCoord('8 00 -5 00 00.0', unit=(u.hour, u.deg), frame='icrs') + assert isinstance(sc2, SkyCoord) + assert allclose(sc2.ra, Angle(120 * u.deg)) + assert allclose(sc2.dec, Angle(-5 * u.deg)) + + sc3 = SkyCoord('8 00 -5 00.6', unit=(u.hour, u.deg), frame='icrs') + assert isinstance(sc3, SkyCoord) + assert allclose(sc3.ra, Angle(120 * u.deg)) + assert allclose(sc3.dec, Angle(-5.01 * u.deg)) + + sc4 = SkyCoord('J080000.00-050036.00', unit=(u.hour, u.deg), frame='icrs') + assert isinstance(sc4, SkyCoord) + assert allclose(sc4.ra, Angle(120 * u.deg)) + assert allclose(sc4.dec, Angle(-5.01 * u.deg)) + + sc41 = SkyCoord('J080000+050036', unit=(u.hour, u.deg), frame='icrs') + assert isinstance(sc41, SkyCoord) + assert allclose(sc41.ra, Angle(120 * u.deg)) + assert allclose(sc41.dec, Angle(+5.01 * u.deg)) + + sc5 = SkyCoord('8h00.6m -5d00.6m', unit=(u.hour, u.deg), frame='icrs') + assert isinstance(sc5, SkyCoord) + assert allclose(sc5.ra, Angle(120.15 * u.deg)) + assert allclose(sc5.dec, Angle(-5.01 * u.deg)) + + sc6 = SkyCoord('8h00.6m -5d00.6m', unit=(u.hour, u.deg), frame='fk4') + assert isinstance(sc6, SkyCoord) + assert allclose(sc6.ra, Angle(120.15 * u.deg)) + assert allclose(sc6.dec, Angle(-5.01 * u.deg)) + + sc61 = SkyCoord('8h00.6m-5d00.6m', unit=(u.hour, u.deg), frame='fk4') + assert isinstance(sc61, SkyCoord) + assert allclose(sc6.ra, Angle(120.15 * u.deg)) + assert allclose(sc6.dec, Angle(-5.01 * u.deg)) + + sc61 = SkyCoord('8h00.6-5d00.6', unit=(u.hour, u.deg), frame='fk4') + assert isinstance(sc61, SkyCoord) + assert allclose(sc6.ra, Angle(120.15 * u.deg)) + assert allclose(sc6.dec, Angle(-5.01 * u.deg)) + + sc7 = SkyCoord("J1874221.60+122421.6", unit=u.deg) + assert isinstance(sc7, SkyCoord) + assert allclose(sc7.ra, Angle(187.706 * u.deg)) + assert allclose(sc7.dec, Angle(12.406 * u.deg)) + + with pytest.raises(ValueError): + SkyCoord('8 00 -5 00.6', unit=(u.deg, u.deg), frame='galactic') + + +def test_coord_init_unit(): + """ + Test variations of the unit keyword. + """ + for unit in ('deg', 'deg,deg', ' deg , deg ', u.deg, (u.deg, u.deg), + np.array(['deg', 'deg'])): + sc = SkyCoord(1, 2, unit=unit) + assert allclose(sc.ra, Angle(1 * u.deg)) + assert allclose(sc.dec, Angle(2 * u.deg)) + + for unit in ('hourangle', 'hourangle,hourangle', ' hourangle , hourangle ', + u.hourangle, [u.hourangle, u.hourangle]): + sc = SkyCoord(1, 2, unit=unit) + assert allclose(sc.ra, Angle(15 * u.deg)) + assert allclose(sc.dec, Angle(30 * u.deg)) + + for unit in ('hourangle,deg', (u.hourangle, u.deg)): + sc = SkyCoord(1, 2, unit=unit) + assert allclose(sc.ra, Angle(15 * u.deg)) + assert allclose(sc.dec, Angle(2 * u.deg)) + + for unit in ('deg,deg,deg,deg', [u.deg, u.deg, u.deg, u.deg], None): + with pytest.raises(ValueError) as err: + SkyCoord(1, 2, unit=unit) + assert 'Unit keyword must have one to three unit values' in str(err) + + for unit in ('m', (u.m, u.deg), ''): + with pytest.raises(u.UnitsError) as err: + SkyCoord(1, 2, unit=unit) + + +def test_coord_init_list(): + """ + Spherical or Cartesian representation input coordinates. + """ + sc = SkyCoord([('1d', '2d'), + (1 * u.deg, 2 * u.deg), + '1d 2d', + ('1°', '2°'), + '1° 2°'], unit='deg') + assert allclose(sc.ra, Angle('1d')) + assert allclose(sc.dec, Angle('2d')) + + with pytest.raises(ValueError) as err: + SkyCoord(['1d 2d 3d']) + assert "Cannot parse first argument data" in str(err) + + with pytest.raises(ValueError) as err: + SkyCoord([('1d', '2d', '3d')]) + assert "Cannot parse first argument data" in str(err) + + sc = SkyCoord([1 * u.deg, 1 * u.deg], [2 * u.deg, 2 * u.deg]) + assert allclose(sc.ra, Angle('1d')) + assert allclose(sc.dec, Angle('2d')) + + with pytest.raises(ValueError) as err: + SkyCoord([1 * u.deg, 2 * u.deg]) # this list is taken as RA w/ missing dec + assert "One or more elements of input sequence does not have a length" in str(err) + + +def test_coord_init_array(): + """ + Input in the form of a list array or numpy array + """ + for a in (['1 2', '3 4'], + [['1', '2'], ['3', '4']], + [[1, 2], [3, 4]]): + sc = SkyCoord(a, unit='deg') + assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg) + assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg) + + sc = SkyCoord(np.array(a), unit='deg') + assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg) + assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg) + + +def test_coord_init_representation(): + """ + Spherical or Cartesian represenation input coordinates. + """ + coord = SphericalRepresentation(lon=8 * u.deg, lat=5 * u.deg, distance=1 * u.kpc) + sc = SkyCoord(coord, 'icrs') + assert allclose(sc.ra, coord.lon) + assert allclose(sc.dec, coord.lat) + assert allclose(sc.distance, coord.distance) + + with pytest.raises(ValueError) as err: + SkyCoord(coord, 'icrs', ra='1d') + assert "conflicts with keyword argument 'ra'" in str(err) + + coord = CartesianRepresentation(1 * u.one, 2 * u.one, 3 * u.one) + sc = SkyCoord(coord, 'icrs') + sc_cart = sc.represent_as(CartesianRepresentation) + assert allclose(sc_cart.x, 1.0) + assert allclose(sc_cart.y, 2.0) + assert allclose(sc_cart.z, 3.0) + + +FRAME_DEPRECATION_WARNING = ("Passing a frame as a positional argument is now " + "deprecated, use the frame= keyword argument " + "instead.") + + +def test_frame_init(): + """ + Different ways of providing the frame. + """ + + sc = SkyCoord(RA, DEC, frame='icrs') + assert sc.frame.name == 'icrs' + + sc = SkyCoord(RA, DEC, frame=ICRS) + assert sc.frame.name == 'icrs' + + with catch_warnings(AstropyDeprecationWarning) as w: + sc = SkyCoord(RA, DEC, 'icrs') + assert sc.frame.name == 'icrs' + assert len(w) == 1 + assert str(w[0].message) == FRAME_DEPRECATION_WARNING + + with catch_warnings(AstropyDeprecationWarning) as w: + sc = SkyCoord(RA, DEC, ICRS) + assert sc.frame.name == 'icrs' + assert len(w) == 1 + assert str(w[0].message) == FRAME_DEPRECATION_WARNING + + with catch_warnings(AstropyDeprecationWarning) as w: + sc = SkyCoord('icrs', RA, DEC) + assert sc.frame.name == 'icrs' + assert len(w) == 1 + assert str(w[0].message) == FRAME_DEPRECATION_WARNING + + with catch_warnings(AstropyDeprecationWarning) as w: + sc = SkyCoord(ICRS, RA, DEC) + assert sc.frame.name == 'icrs' + assert len(w) == 1 + assert str(w[0].message) == FRAME_DEPRECATION_WARNING + + sc = SkyCoord(sc) + assert sc.frame.name == 'icrs' + + sc = SkyCoord(C_ICRS) + assert sc.frame.name == 'icrs' + + SkyCoord(C_ICRS, frame='icrs') + assert sc.frame.name == 'icrs' + + with pytest.raises(ValueError) as err: + SkyCoord(C_ICRS, frame='galactic') + assert 'Cannot override frame=' in str(err) + + +def test_attr_inheritance(): + """ + When initializing from an existing coord the representation attrs like + equinox should be inherited to the SkyCoord. If there is a conflict + then raise an exception. + """ + sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001') + sc2 = SkyCoord(sc) + assert sc2.equinox == sc.equinox + assert sc2.obstime == sc.obstime + assert allclose(sc2.ra, sc.ra) + assert allclose(sc2.dec, sc.dec) + assert allclose(sc2.distance, sc.distance) + + sc2 = SkyCoord(sc.frame) # Doesn't have equinox there so we get FK4 defaults + assert sc2.equinox != sc.equinox + assert sc2.obstime != sc.obstime + assert allclose(sc2.ra, sc.ra) + assert allclose(sc2.dec, sc.dec) + assert allclose(sc2.distance, sc.distance) + + sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001') + sc2 = SkyCoord(sc) + assert sc2.equinox == sc.equinox + assert sc2.obstime == sc.obstime + assert allclose(sc2.ra, sc.ra) + assert allclose(sc2.dec, sc.dec) + assert allclose(sc2.distance, sc.distance) + + sc2 = SkyCoord(sc.frame) # sc.frame has equinox, obstime + assert sc2.equinox == sc.equinox + assert sc2.obstime == sc.obstime + assert allclose(sc2.ra, sc.ra) + assert allclose(sc2.dec, sc.dec) + assert allclose(sc2.distance, sc.distance) + + +def test_attr_conflicts(): + """ + Check conflicts resolution between coordinate attributes and init kwargs. + """ + sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001') + + # OK if attrs both specified but with identical values + SkyCoord(sc, equinox='J1999', obstime='J2001') + + # OK because sc.frame doesn't have obstime + SkyCoord(sc.frame, equinox='J1999', obstime='J2100') + + # Not OK if attrs don't match + with pytest.raises(ValueError) as err: + SkyCoord(sc, equinox='J1999', obstime='J2002') + assert "Coordinate attribute 'obstime'=" in str(err) + + # Same game but with fk4 which has equinox and obstime frame attrs + sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001') + + # OK if attrs both specified but with identical values + SkyCoord(sc, equinox='J1999', obstime='J2001') + + # Not OK if SkyCoord attrs don't match + with pytest.raises(ValueError) as err: + SkyCoord(sc, equinox='J1999', obstime='J2002') + assert "Coordinate attribute 'obstime'=" in str(err) + + # Not OK because sc.frame has different attrs + with pytest.raises(ValueError) as err: + SkyCoord(sc.frame, equinox='J1999', obstime='J2002') + assert "Coordinate attribute 'obstime'=" in str(err) + + +def test_frame_attr_getattr(): + """ + When accessing frame attributes like equinox, the value should come + from self.frame when that object has the relevant attribute, otherwise + from self. + """ + sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001') + assert sc.equinox == 'J1999' # Just the raw value (not validated) + assert sc.obstime == 'J2001' + + sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001') + assert sc.equinox == Time('J1999') # Coming from the self.frame object + assert sc.obstime == Time('J2001') + + sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999') + assert sc.equinox == Time('J1999') + assert sc.obstime == Time('J1999') + + +def test_to_string(): + """ + Basic testing of converting SkyCoord to strings. This just tests + for a single input coordinate and and 1-element list. It does not + test the underlying `Angle.to_string` method itself. + """ + coord = '1h2m3s 1d2m3s' + for wrap in (lambda x: x, lambda x: [x]): + sc = SkyCoord(wrap(coord)) + assert sc.to_string() == wrap('15.5125 1.03417') + assert sc.to_string('dms') == wrap('15d30m45s 1d02m03s') + assert sc.to_string('hmsdms') == wrap('01h02m03s +01d02m03s') + with_kwargs = sc.to_string('hmsdms', precision=3, pad=True, alwayssign=True) + assert with_kwargs == wrap('+01h02m03.000s +01d02m03.000s') + + +def test_seps(): + sc1 = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs') + sc2 = SkyCoord(0 * u.deg, 2 * u.deg, frame='icrs') + + sep = sc1.separation(sc2) + + assert (sep - 1 * u.deg)/u.deg < 1e-10 + + with pytest.raises(ValueError): + sc1.separation_3d(sc2) + + sc3 = SkyCoord(1 * u.deg, 1 * u.deg, distance=1 * u.kpc, frame='icrs') + sc4 = SkyCoord(1 * u.deg, 1 * u.deg, distance=2 * u.kpc, frame='icrs') + sep3d = sc3.separation_3d(sc4) + + assert sep3d == 1 * u.kpc + + +def test_repr(): + sc1 = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs') + sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame='icrs', distance=1 * u.kpc) + + assert repr(sc1) == ('').format(' 0., 1.' if NUMPY_LT_1_14 else + '0., 1.') + assert repr(sc2) == ('').format(' 1., 1., 1.' if NUMPY_LT_1_14 + else '1., 1., 1.') + + sc3 = SkyCoord(0.25 * u.deg, [1, 2.5] * u.deg, frame='icrs') + assert repr(sc3).startswith('').format(' 0., 1.' if NUMPY_LT_1_14 + else '0., 1.') + + +def test_repr_altaz(): + sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame='icrs', distance=1 * u.kpc) + loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m) + time = Time('2005-03-21 00:00:00') + sc4 = sc2.transform_to(AltAz(location=loc, obstime=time)) + assert repr(sc4).startswith(" 270*u.degree) + + cicrs = SkyCoord(0*u.deg, 0*u.deg, frame='icrs') + cfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5') + # because of the frame transform, it's just a *bit* more than 90 degrees + assert cicrs.position_angle(cfk5) > 90.0 * u.deg + assert cicrs.position_angle(cfk5) < 91.0 * u.deg + + +def test_position_angle_directly(): + """Regression check for #3800: position_angle should accept floats.""" + from ..angle_utilities import position_angle + result = position_angle(10., 20., 10., 20.) + assert result.unit is u.radian + assert result.value == 0. + + +def test_sep_pa_equivalence(): + """Regression check for bug in #5702. + + PA and separation from object 1 to 2 should be consistent with those + from 2 to 1 + """ + cfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5') + cfk5B1950 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', equinox='B1950') + # test with both default and explicit equinox #5722 and #3106 + sep_forward = cfk5.separation(cfk5B1950) + sep_backward = cfk5B1950.separation(cfk5) + assert sep_forward != 0 and sep_backward != 0 + assert_allclose(sep_forward, sep_backward) + posang_forward = cfk5.position_angle(cfk5B1950) + posang_backward = cfk5B1950.position_angle(cfk5) + assert posang_forward != 0 and posang_backward != 0 + assert 179 < (posang_forward - posang_backward).wrap_at(360*u.deg).degree < 181 + dcfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', distance=1*u.pc) + dcfk5B1950 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', equinox='B1950', + distance=1.*u.pc) + sep3d_forward = dcfk5.separation_3d(dcfk5B1950) + sep3d_backward = dcfk5B1950.separation_3d(dcfk5) + assert sep3d_forward != 0 and sep3d_backward != 0 + assert_allclose(sep3d_forward, sep3d_backward) + + +def test_table_to_coord(): + """ + Checks "end-to-end" use of `Table` with `SkyCoord` - the `Quantity` + initializer is the intermediary that translate the table columns into + something coordinates understands. + + (Regression test for #1762 ) + """ + from ...table import Table, Column + + t = Table() + t.add_column(Column(data=[1, 2, 3], name='ra', unit=u.deg)) + t.add_column(Column(data=[4, 5, 6], name='dec', unit=u.deg)) + + c = SkyCoord(t['ra'], t['dec']) + + assert allclose(c.ra.to(u.deg), [1, 2, 3] * u.deg) + assert allclose(c.dec.to(u.deg), [4, 5, 6] * u.deg) + + +def assert_quantities_allclose(coord, q1s, attrs): + """ + Compare two tuples of quantities. This assumes that the values in q1 are of + order(1) and uses atol=1e-13, rtol=0. It also asserts that the units of the + two quantities are the *same*, in order to check that the representation + output has the expected units. + """ + q2s = [getattr(coord, attr) for attr in attrs] + assert len(q1s) == len(q2s) + for q1, q2 in zip(q1s, q2s): + assert q1.shape == q2.shape + assert allclose(q1, q2, rtol=0, atol=1e-13 * q1.unit) + + +# Sets of inputs corresponding to Galactic frame +base_unit_attr_sets = [ + ('spherical', u.karcsec, u.karcsec, u.kpc, Latitude, 'l', 'b', 'distance'), + ('unitspherical', u.karcsec, u.karcsec, None, Latitude, 'l', 'b', None), + ('physicsspherical', u.karcsec, u.karcsec, u.kpc, Angle, 'phi', 'theta', 'r'), + ('cartesian', u.km, u.km, u.km, u.Quantity, 'u', 'v', 'w'), + ('cylindrical', u.km, u.karcsec, u.km, Angle, 'rho', 'phi', 'z') +] + +units_attr_sets = [] +for base_unit_attr_set in base_unit_attr_sets: + repr_name = base_unit_attr_set[0] + for representation in (repr_name, REPRESENTATION_CLASSES[repr_name]): + for c1, c2, c3 in ((1, 2, 3), ([1], [2], [3])): + for arrayify in True, False: + if arrayify: + c1 = np.array(c1) + c2 = np.array(c2) + c3 = np.array(c3) + units_attr_sets.append(base_unit_attr_set + (representation, c1, c2, c3)) +units_attr_args = ('repr_name', 'unit1', 'unit2', 'unit3', 'cls2', 'attr1', 'attr2', 'attr3', 'representation', 'c1', 'c2', 'c3') + + +@pytest.mark.parametrize(units_attr_args, + [x for x in units_attr_sets if x[0] != 'unitspherical']) +def test_skycoord_three_components(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3, + representation, c1, c2, c3): + """ + Tests positional inputs using components (COMP1, COMP2, COMP3) + and various representations. Use weird units and Galactic frame. + """ + sc = SkyCoord(Galactic, c1, c2, c3, unit=(unit1, unit2, unit3), + representation=representation) + assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3), + (attr1, attr2, attr3)) + + sc = SkyCoord(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2), + 1000*c3*u.Unit(unit3/1000), Galactic, + unit=(unit1, unit2, unit3), representation=representation) + assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3), + (attr1, attr2, attr3)) + + kwargs = {attr3: c3} + sc = SkyCoord(Galactic, c1, c2, unit=(unit1, unit2, unit3), + representation=representation, **kwargs) + assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3), + (attr1, attr2, attr3)) + + kwargs = {attr1: c1, attr2: c2, attr3: c3} + sc = SkyCoord(Galactic, unit=(unit1, unit2, unit3), + representation=representation, **kwargs) + assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3), + (attr1, attr2, attr3)) + + +@pytest.mark.parametrize(units_attr_args, + [x for x in units_attr_sets + if x[0] in ('spherical', 'unitspherical')]) +def test_skycoord_spherical_two_components(repr_name, unit1, unit2, unit3, cls2, + attr1, attr2, attr3, representation, c1, c2, c3): + """ + Tests positional inputs using components (COMP1, COMP2) for spherical + representations. Use weird units and Galactic frame. + """ + sc = SkyCoord(Galactic, c1, c2, unit=(unit1, unit2), + representation=representation) + assert_quantities_allclose(sc, (c1*unit1, c2*unit2), + (attr1, attr2)) + + sc = SkyCoord(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2), + Galactic, + unit=(unit1, unit2, unit3), representation=representation) + assert_quantities_allclose(sc, (c1*unit1, c2*unit2), + (attr1, attr2)) + + kwargs = {attr1: c1, attr2: c2} + sc = SkyCoord(Galactic, unit=(unit1, unit2), + representation=representation, **kwargs) + assert_quantities_allclose(sc, (c1*unit1, c2*unit2), + (attr1, attr2)) + + +@pytest.mark.parametrize(units_attr_args, + [x for x in units_attr_sets if x[0] != 'unitspherical']) +def test_galactic_three_components(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3, + representation, c1, c2, c3): + """ + Tests positional inputs using components (COMP1, COMP2, COMP3) + and various representations. Use weird units and Galactic frame. + """ + sc = Galactic(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2), + 1000*c3*u.Unit(unit3/1000), representation=representation) + assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3), + (attr1, attr2, attr3)) + + kwargs = {attr3: c3*unit3} + sc = Galactic(c1*unit1, c2*unit2, + representation=representation, **kwargs) + assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3), + (attr1, attr2, attr3)) + + kwargs = {attr1: c1*unit1, attr2: c2*unit2, attr3: c3*unit3} + sc = Galactic(representation=representation, **kwargs) + assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3), + (attr1, attr2, attr3)) + + +@pytest.mark.parametrize(units_attr_args, + [x for x in units_attr_sets + if x[0] in ('spherical', 'unitspherical')]) +def test_galactic_spherical_two_components(repr_name, unit1, unit2, unit3, cls2, + attr1, attr2, attr3, representation, c1, c2, c3): + """ + Tests positional inputs using components (COMP1, COMP2) for spherical + representations. Use weird units and Galactic frame. + """ + + sc = Galactic(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2), representation=representation) + assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2)) + + sc = Galactic(c1*unit1, c2*unit2, representation=representation) + assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2)) + + kwargs = {attr1: c1*unit1, attr2: c2*unit2} + sc = Galactic(representation=representation, **kwargs) + assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2)) + + +@pytest.mark.parametrize(('repr_name', 'unit1', 'unit2', 'unit3', 'cls2', 'attr1', 'attr2', 'attr3'), + [x for x in base_unit_attr_sets if x[0] != 'unitspherical']) +def test_skycoord_coordinate_input(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3): + c1, c2, c3 = 1, 2, 3 + sc = SkyCoord([(c1, c2, c3)], unit=(unit1, unit2, unit3), representation=repr_name, + frame='galactic') + assert_quantities_allclose(sc, ([c1]*unit1, [c2]*unit2, [c3]*unit3), (attr1, attr2, attr3)) + + c1, c2, c3 = 1*unit1, 2*unit2, 3*unit3 + sc = SkyCoord([(c1, c2, c3)], representation=repr_name, frame='galactic') + assert_quantities_allclose(sc, ([1]*unit1, [2]*unit2, [3]*unit3), (attr1, attr2, attr3)) + + +def test_skycoord_string_coordinate_input(): + sc = SkyCoord('01 02 03 +02 03 04', unit='deg', representation='unitspherical') + assert_quantities_allclose(sc, (Angle('01:02:03', unit='deg'), + Angle('02:03:04', unit='deg')), + ('ra', 'dec')) + sc = SkyCoord(['01 02 03 +02 03 04'], unit='deg', representation='unitspherical') + assert_quantities_allclose(sc, (Angle(['01:02:03'], unit='deg'), + Angle(['02:03:04'], unit='deg')), + ('ra', 'dec')) + + +def test_units(): + sc = SkyCoord(1, 2, 3, unit='m', representation='cartesian') # All get meters + assert sc.x.unit is u.m + assert sc.y.unit is u.m + assert sc.z.unit is u.m + + sc = SkyCoord(1, 2*u.km, 3, unit='m', representation='cartesian') # All get u.m + assert sc.x.unit is u.m + assert sc.y.unit is u.m + assert sc.z.unit is u.m + + sc = SkyCoord(1, 2, 3, unit=u.m, representation='cartesian') # All get u.m + assert sc.x.unit is u.m + assert sc.y.unit is u.m + assert sc.z.unit is u.m + + sc = SkyCoord(1, 2, 3, unit='m, km, pc', representation='cartesian') + assert_quantities_allclose(sc, (1*u.m, 2*u.km, 3*u.pc), ('x', 'y', 'z')) + + with pytest.raises(u.UnitsError) as err: + SkyCoord(1, 2, 3, unit=(u.m, u.m), representation='cartesian') + assert 'should have matching physical types' in str(err) + + SkyCoord(1, 2, 3, unit=(u.m, u.km, u.pc), representation='cartesian') + assert_quantities_allclose(sc, (1*u.m, 2*u.km, 3*u.pc), ('x', 'y', 'z')) + + +@pytest.mark.xfail +def test_units_known_fail(): + # should fail but doesn't => corner case oddity + with pytest.raises(u.UnitsError): + SkyCoord(1, 2, 3, unit=u.deg, representation='spherical') + + +def test_nodata_failure(): + with pytest.raises(ValueError): + SkyCoord() + + +@pytest.mark.parametrize(('mode', 'origin'), [('wcs', 0), + ('all', 0), + ('all', 1)]) +def test_wcs_methods(mode, origin): + from ...wcs import WCS + from ...utils.data import get_pkg_data_contents + from ...wcs.utils import pixel_to_skycoord + + header = get_pkg_data_contents('../../wcs/tests/maps/1904-66_TAN.hdr', encoding='binary') + wcs = WCS(header) + + ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs') + + xp, yp = ref.to_pixel(wcs, mode=mode, origin=origin) + + # WCS is in FK5 so we need to transform back to ICRS + new = pixel_to_skycoord(xp, yp, wcs, mode=mode, origin=origin).transform_to('icrs') + + assert_allclose(new.ra.degree, ref.ra.degree) + assert_allclose(new.dec.degree, ref.dec.degree) + + # also try to round-trip with `from_pixel` + scnew = SkyCoord.from_pixel(xp, yp, wcs, mode=mode, origin=origin).transform_to('icrs') + assert_allclose(scnew.ra.degree, ref.ra.degree) + assert_allclose(scnew.dec.degree, ref.dec.degree) + + # Also make sure the right type comes out + class SkyCoord2(SkyCoord): + pass + scnew2 = SkyCoord2.from_pixel(xp, yp, wcs, mode=mode, origin=origin) + assert scnew.__class__ is SkyCoord + assert scnew2.__class__ is SkyCoord2 + + +def test_frame_attr_transform_inherit(): + """ + Test that frame attributes get inherited as expected during transform. + Driven by #3106. + """ + c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK5) + c2 = c.transform_to(FK4) + assert c2.equinox.value == 'B1950.000' + assert c2.obstime.value == 'B1950.000' + + c2 = c.transform_to(FK4(equinox='J1975', obstime='J1980')) + assert c2.equinox.value == 'J1975.000' + assert c2.obstime.value == 'J1980.000' + + c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4) + c2 = c.transform_to(FK5) + assert c2.equinox.value == 'J2000.000' + assert c2.obstime is None + + c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, obstime='J1980') + c2 = c.transform_to(FK5) + assert c2.equinox.value == 'J2000.000' + assert c2.obstime.value == 'J1980.000' + + c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, equinox='J1975', obstime='J1980') + c2 = c.transform_to(FK5) + assert c2.equinox.value == 'J1975.000' + assert c2.obstime.value == 'J1980.000' + + c2 = c.transform_to(FK5(equinox='J1990')) + assert c2.equinox.value == 'J1990.000' + assert c2.obstime.value == 'J1980.000' + + # The work-around for #5722 + c = SkyCoord(1 * u.deg, 2 * u.deg, frame='fk5') + c1 = SkyCoord(1 * u.deg, 2 * u.deg, frame='fk5', equinox='B1950.000') + c2 = c1.transform_to(c) + assert not c2.is_equivalent_frame(c) # counterintuitive, but documented + assert c2.equinox.value == 'B1950.000' + c3 = c1.transform_to(c, merge_attributes=False) + assert c3.equinox.value == 'J2000.000' + assert c3.is_equivalent_frame(c) + + +def test_deepcopy(): + c1 = SkyCoord(1 * u.deg, 2 * u.deg) + c2 = copy.copy(c1) + c3 = copy.deepcopy(c1) + + c4 = SkyCoord([1, 2] * u.m, [2, 3] * u.m, [3, 4] * u.m, representation='cartesian', frame='fk5', + obstime='J1999.9', equinox='J1988.8') + c5 = copy.deepcopy(c4) + assert np.all(c5.x == c4.x) # and y and z + assert c5.frame.name == c4.frame.name + assert c5.obstime == c4.obstime + assert c5.equinox == c4.equinox + assert c5.representation == c4.representation + + +def test_no_copy(): + c1 = SkyCoord(np.arange(10.) * u.hourangle, np.arange(20., 30.) * u.deg) + c2 = SkyCoord(c1, copy=False) + # Note: c1.ra and c2.ra will *not* share memory, as these are recalculated + # to be in "preferred" units. See discussion in #4883. + assert np.may_share_memory(c1.data.lon, c2.data.lon) + c3 = SkyCoord(c1, copy=True) + assert not np.may_share_memory(c1.data.lon, c3.data.lon) + + +def test_immutable(): + c1 = SkyCoord(1 * u.deg, 2 * u.deg) + with pytest.raises(AttributeError): + c1.ra = 3.0 + + c1.foo = 42 + assert c1.foo == 42 + + +@pytest.mark.skipif(str('not HAS_SCIPY')) +@pytest.mark.skipif(str('OLDER_SCIPY')) +def test_search_around(): + """ + Test the search_around_* methods + + Here we don't actually test the values are right, just that the methods of + SkyCoord work. The accuracy tests are in ``test_matching.py`` + """ + from ...utils import NumpyRNGContext + + with NumpyRNGContext(987654321): + sc1 = SkyCoord(np.random.rand(20) * 360.*u.degree, + (np.random.rand(20) * 180. - 90.)*u.degree) + sc2 = SkyCoord(np.random.rand(100) * 360. * u.degree, + (np.random.rand(100) * 180. - 90.)*u.degree) + + sc1ds = SkyCoord(ra=sc1.ra, dec=sc1.dec, distance=np.random.rand(20)*u.kpc) + sc2ds = SkyCoord(ra=sc2.ra, dec=sc2.dec, distance=np.random.rand(100)*u.kpc) + + idx1_sky, idx2_sky, d2d_sky, d3d_sky = sc1.search_around_sky(sc2, 10*u.deg) + idx1_3d, idx2_3d, d2d_3d, d3d_3d = sc1ds.search_around_3d(sc2ds, 250*u.pc) + + +def test_init_with_frame_instance_keyword(): + + # Frame instance + c1 = SkyCoord(3 * u.deg, 4 * u.deg, + frame=FK5(equinox='J2010')) + assert c1.equinox == Time('J2010') + + # Frame instance with data (data gets ignored) + c2 = SkyCoord(3 * u.deg, 4 * u.deg, + frame=FK5(1. * u.deg, 2 * u.deg, + equinox='J2010')) + assert c2.equinox == Time('J2010') + assert allclose(c2.ra.degree, 3) + assert allclose(c2.dec.degree, 4) + + # SkyCoord instance + c3 = SkyCoord(3 * u.deg, 4 * u.deg, frame=c1) + assert c3.equinox == Time('J2010') + + # Check duplicate arguments + with pytest.raises(ValueError) as exc: + c = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox='J2010'), equinox='J2001') + assert exc.value.args[0] == ("cannot specify frame attribute " + "'equinox' directly in SkyCoord " + "since a frame instance was passed in") + + +def test_init_with_frame_instance_positional(): + + # Frame instance + with pytest.raises(ValueError) as exc: + c1 = SkyCoord(3 * u.deg, 4 * u.deg, FK5(equinox='J2010')) + assert exc.value.args[0] == ("FK5 instance cannot be passed as a " + "positional argument for the frame, " + "pass it using the frame= keyword " + "instead.") + + # Positional frame instance with data raises exception + with pytest.raises(ValueError) as exc: + SkyCoord(3 * u.deg, 4 * u.deg, FK5(1. * u.deg, 2 * u.deg, equinox='J2010')) + assert exc.value.args[0] == ("FK5 instance cannot be passed as a " + "positional argument for the frame, " + "pass it using the frame= keyword " + "instead.") + + # Positional SkyCoord instance (for frame) raises exception + with pytest.raises(ValueError) as exc: + SkyCoord(3 * u.deg, 4 * u.deg, SkyCoord(1. * u.deg, 2 * u.deg, equinox='J2010')) + assert exc.value.args[0] == ("SkyCoord instance cannot be passed as a " + "positional argument for the frame, " + "pass it using the frame= keyword " + "instead.") + + +def test_guess_from_table(): + from ...table import Table, Column + from ...utils import NumpyRNGContext + + tab = Table() + with NumpyRNGContext(987654321): + tab.add_column(Column(data=np.random.rand(1000), unit='deg', name='RA[J2000]')) + tab.add_column(Column(data=np.random.rand(1000), unit='deg', name='DEC[J2000]')) + + sc = SkyCoord.guess_from_table(tab) + npt.assert_array_equal(sc.ra.deg, tab['RA[J2000]']) + npt.assert_array_equal(sc.dec.deg, tab['DEC[J2000]']) + + # try without units in the table + tab['RA[J2000]'].unit = None + tab['DEC[J2000]'].unit = None + # should fail if not given explicitly + with pytest.raises(u.UnitsError): + sc2 = SkyCoord.guess_from_table(tab) + + # but should work if provided + sc2 = SkyCoord.guess_from_table(tab, unit=u.deg) + npt.assert_array_equal(sc.ra.deg, tab['RA[J2000]']) + npt.assert_array_equal(sc.dec.deg, tab['DEC[J2000]']) + + # should fail if two options are available - ambiguity bad! + tab.add_column(Column(data=np.random.rand(1000), name='RA_J1900')) + with pytest.raises(ValueError) as excinfo: + sc3 = SkyCoord.guess_from_table(tab, unit=u.deg) + assert 'J1900' in excinfo.value.args[0] and 'J2000' in excinfo.value.args[0] + + # should also fail if user specifies something already in the table, but + # should succeed even if the user has to give one of the components + tab.remove_column('RA_J1900') + with pytest.raises(ValueError): + sc3 = SkyCoord.guess_from_table(tab, ra=tab['RA[J2000]'], unit=u.deg) + + oldra = tab['RA[J2000]'] + tab.remove_column('RA[J2000]') + sc3 = SkyCoord.guess_from_table(tab, ra=oldra, unit=u.deg) + npt.assert_array_equal(sc3.ra.deg, oldra) + npt.assert_array_equal(sc3.dec.deg, tab['DEC[J2000]']) + + # check a few non-ICRS/spherical systems + x, y, z = np.arange(3).reshape(3, 1) * u.pc + l, b = np.arange(2).reshape(2, 1) * u.deg + + tabcart = Table([x, y, z], names=('x', 'y', 'z')) + tabgal = Table([b, l], names=('b', 'l')) + + sc_cart = SkyCoord.guess_from_table(tabcart, representation='cartesian') + npt.assert_array_equal(sc_cart.x, x) + npt.assert_array_equal(sc_cart.y, y) + npt.assert_array_equal(sc_cart.z, z) + + sc_gal = SkyCoord.guess_from_table(tabgal, frame='galactic') + npt.assert_array_equal(sc_gal.l, l) + npt.assert_array_equal(sc_gal.b, b) + + # also try some column names that *end* with the attribute name + tabgal['b'].name = 'gal_b' + tabgal['l'].name = 'gal_l' + SkyCoord.guess_from_table(tabgal, frame='galactic') + + tabgal['gal_b'].name = 'blob' + tabgal['gal_l'].name = 'central' + with pytest.raises(ValueError): + SkyCoord.guess_from_table(tabgal, frame='galactic') + + +def test_skycoord_list_creation(): + """ + Test that SkyCoord can be created in a reasonable way with lists of SkyCoords + (regression for #2702) + """ + sc = SkyCoord(ra=[1, 2, 3]*u.deg, dec=[4, 5, 6]*u.deg) + sc0 = sc[0] + sc2 = sc[2] + scnew = SkyCoord([sc0, sc2]) + assert np.all(scnew.ra == [1, 3]*u.deg) + assert np.all(scnew.dec == [4, 6]*u.deg) + + # also check ranges + sc01 = sc[:2] + scnew2 = SkyCoord([sc01, sc2]) + assert np.all(scnew2.ra == sc.ra) + assert np.all(scnew2.dec == sc.dec) + + # now try with a mix of skycoord, frame, and repr objects + frobj = ICRS(2*u.deg, 5*u.deg) + reprobj = UnitSphericalRepresentation(3*u.deg, 6*u.deg) + scnew3 = SkyCoord([sc0, frobj, reprobj]) + assert np.all(scnew3.ra == sc.ra) + assert np.all(scnew3.dec == sc.dec) + + # should *fail* if different frame attributes or types are passed in + scfk5_j2000 = SkyCoord(1*u.deg, 4*u.deg, frame='fk5') + with pytest.raises(ValueError): + SkyCoord([sc0, scfk5_j2000]) + scfk5_j2010 = SkyCoord(1*u.deg, 4*u.deg, frame='fk5', equinox='J2010') + with pytest.raises(ValueError): + SkyCoord([scfk5_j2000, scfk5_j2010]) + + # but they should inherit if they're all consistent + scfk5_2_j2010 = SkyCoord(2*u.deg, 5*u.deg, frame='fk5', equinox='J2010') + scfk5_3_j2010 = SkyCoord(3*u.deg, 6*u.deg, frame='fk5', equinox='J2010') + + scnew4 = SkyCoord([scfk5_j2010, scfk5_2_j2010, scfk5_3_j2010]) + assert np.all(scnew4.ra == sc.ra) + assert np.all(scnew4.dec == sc.dec) + assert scnew4.equinox == Time('J2010') + + +def test_nd_skycoord_to_string(): + c = SkyCoord(np.ones((2, 2)), 1, unit=('deg', 'deg')) + ts = c.to_string() + assert np.all(ts.shape == c.shape) + assert np.all(ts == u'1 1') + + +def test_equiv_skycoord(): + sci1 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs') + sci2 = SkyCoord(1*u.deg, 3*u.deg, frame='icrs') + assert sci1.is_equivalent_frame(sci1) + assert sci1.is_equivalent_frame(sci2) + + assert sci1.is_equivalent_frame(ICRS()) + assert not sci1.is_equivalent_frame(FK5()) + with pytest.raises(TypeError): + sci1.is_equivalent_frame(10) + + scf1 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5') + scf2 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5', equinox='J2005') + # obstime is *not* an FK5 attribute, but we still want scf1 and scf3 to come + # to come out different because they're part of SkyCoord + scf3 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5', obstime='J2005') + + assert scf1.is_equivalent_frame(scf1) + assert not scf1.is_equivalent_frame(sci1) + assert scf1.is_equivalent_frame(FK5()) + + assert not scf1.is_equivalent_frame(scf2) + assert scf2.is_equivalent_frame(FK5(equinox='J2005')) + assert not scf3.is_equivalent_frame(scf1) + assert not scf3.is_equivalent_frame(FK5(equinox='J2005')) + + +def test_constellations(): + # the actual test for accuracy is in test_funcs - this is just meant to make + # sure we get sensible answers + sc = SkyCoord(135*u.deg, 65*u.deg) + assert sc.get_constellation() == 'Ursa Major' + assert sc.get_constellation(short_name=True) == 'UMa' + + scs = SkyCoord([135]*2*u.deg, [65]*2*u.deg) + npt.assert_equal(scs.get_constellation(), ['Ursa Major']*2) + npt.assert_equal(scs.get_constellation(short_name=True), ['UMa']*2) + + +@remote_data +def test_constellations_with_nameresolve(): + assert SkyCoord.from_name('And I').get_constellation(short_name=True) == 'And' + + # you'd think "And ..." should be in Andromeda. But you'd be wrong. + assert SkyCoord.from_name('And VI').get_constellation() == 'Pegasus' + + # maybe it's because And VI isn't really a galaxy? + assert SkyCoord.from_name('And XXII').get_constellation() == 'Pisces' + assert SkyCoord.from_name('And XXX').get_constellation() == 'Cassiopeia' + # ok maybe not + + # ok, but at least some of the others do make sense... + assert SkyCoord.from_name('Coma Cluster').get_constellation(short_name=True) == 'Com' + assert SkyCoord.from_name('UMa II').get_constellation() == 'Ursa Major' + assert SkyCoord.from_name('Triangulum Galaxy').get_constellation() == 'Triangulum' + + +def test_getitem_representation(): + """ + Make sure current representation survives __getitem__ even if different + from data representation. + """ + sc = SkyCoord([1, 1] * u.deg, [2, 2] * u.deg) + sc.representation = 'cartesian' + assert sc[0].representation is CartesianRepresentation + + +def test_spherical_offsets(): + i00 = SkyCoord(0*u.arcmin, 0*u.arcmin, frame='icrs') + i01 = SkyCoord(0*u.arcmin, 1*u.arcmin, frame='icrs') + i10 = SkyCoord(1*u.arcmin, 0*u.arcmin, frame='icrs') + i11 = SkyCoord(1*u.arcmin, 1*u.arcmin, frame='icrs') + i22 = SkyCoord(2*u.arcmin, 2*u.arcmin, frame='icrs') + + dra, ddec = i00.spherical_offsets_to(i01) + assert_allclose(dra, 0*u.arcmin) + assert_allclose(ddec, 1*u.arcmin) + + dra, ddec = i00.spherical_offsets_to(i10) + assert_allclose(dra, 1*u.arcmin) + assert_allclose(ddec, 0*u.arcmin) + + dra, ddec = i10.spherical_offsets_to(i01) + assert_allclose(dra, -1*u.arcmin) + assert_allclose(ddec, 1*u.arcmin) + + dra, ddec = i11.spherical_offsets_to(i22) + assert_allclose(ddec, 1*u.arcmin) + assert 0*u.arcmin < dra < 1*u.arcmin + + fk5 = SkyCoord(0*u.arcmin, 0*u.arcmin, frame='fk5') + + with pytest.raises(ValueError): + # different frames should fail + i00.spherical_offsets_to(fk5) + + i1deg = ICRS(1*u.deg, 1*u.deg) + dra, ddec = i00.spherical_offsets_to(i1deg) + assert_allclose(dra, 1*u.deg) + assert_allclose(ddec, 1*u.deg) + + # make sure an abbreviated array-based version of the above also works + i00s = SkyCoord([0]*4*u.arcmin, [0]*4*u.arcmin, frame='icrs') + i01s = SkyCoord([0]*4*u.arcmin, np.arange(4)*u.arcmin, frame='icrs') + dra, ddec = i00s.spherical_offsets_to(i01s) + assert_allclose(dra, 0*u.arcmin) + assert_allclose(ddec, np.arange(4)*u.arcmin) + + +def test_frame_attr_changes(): + """ + This tests the case where a frame is added with a new frame attribute after + a SkyCoord has been created. This is necessary because SkyCoords get the + attributes set at creation time, but the set of attributes can change as + frames are added or removed from the transform graph. This makes sure that + everything continues to work consistently. + """ + sc_before = SkyCoord(1*u.deg, 2*u.deg, frame='icrs') + + assert 'fakeattr' not in dir(sc_before) + + class FakeFrame(BaseCoordinateFrame): + fakeattr = Attribute() + + # doesn't matter what this does as long as it just puts the frame in the + # transform graph + transset = (ICRS, FakeFrame, lambda c, f: c) + frame_transform_graph.add_transform(*transset) + try: + assert 'fakeattr' in dir(sc_before) + assert sc_before.fakeattr is None + + sc_after1 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs') + assert 'fakeattr' in dir(sc_after1) + assert sc_after1.fakeattr is None + + sc_after2 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs', fakeattr=1) + assert sc_after2.fakeattr == 1 + finally: + frame_transform_graph.remove_transform(*transset) + + assert 'fakeattr' not in dir(sc_before) + assert 'fakeattr' not in dir(sc_after1) + assert 'fakeattr' not in dir(sc_after2) + + +def test_cache_clear_sc(): + from .. import SkyCoord + + i = SkyCoord(1*u.deg, 2*u.deg) + + # Add an in frame units version of the rep to the cache. + repr(i) + + assert len(i.cache['representation']) == 2 + + i.cache.clear() + + assert len(i.cache['representation']) == 0 + + +def test_set_attribute_exceptions(): + """Ensure no attrbute for any frame can be set directly. + + Though it is fine if the current frame does not have it.""" + sc = SkyCoord(1.*u.deg, 2.*u.deg, frame='fk5') + assert hasattr(sc.frame, 'equinox') + with pytest.raises(AttributeError): + sc.equinox = 'B1950' + + assert sc.relative_humidity is None + sc.relative_humidity = 0.5 + assert sc.relative_humidity == 0.5 + assert not hasattr(sc.frame, 'relative_humidity') + + +def test_extra_attributes(): + """Ensure any extra attributes are dealt with correctly. + + Regression test against #5743. + """ + obstime_string = ['2017-01-01T00:00', '2017-01-01T00:10'] + obstime = Time(obstime_string) + sc = SkyCoord([5, 10], [20, 30], unit=u.deg, obstime=obstime_string) + assert not hasattr(sc.frame, 'obstime') + assert type(sc.obstime) is Time + assert sc.obstime.shape == (2,) + assert np.all(sc.obstime == obstime) + # ensure equivalency still works for more than one obstime. + assert sc.is_equivalent_frame(sc) + sc_1 = sc[1] + assert sc_1.obstime == obstime[1] + # Transforming to FK4 should use sc.obstime. + sc_fk4 = sc.transform_to('fk4') + assert np.all(sc_fk4.frame.obstime == obstime) + # And transforming back should not loose it. + sc2 = sc_fk4.transform_to('icrs') + assert not hasattr(sc2.frame, 'obstime') + assert np.all(sc2.obstime == obstime) + # Ensure obstime get taken from the SkyCoord if passed in directly. + # (regression test for #5749). + sc3 = SkyCoord([0., 1.], [2., 3.], unit='deg', frame=sc) + assert np.all(sc3.obstime == obstime) + # Finally, check that we can delete such attributes. + del sc3.obstime + assert sc3.obstime is None diff --git a/astropy/coordinates/tests/test_skyoffset_transformations.py b/astropy/coordinates/tests/test_skyoffset_transformations.py new file mode 100644 index 0000000..7d38a96 --- /dev/null +++ b/astropy/coordinates/tests/test_skyoffset_transformations.py @@ -0,0 +1,312 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest +import numpy as np + +from ... import units as u +from ..distances import Distance +from ..builtin_frames import ICRS, FK5, Galactic, AltAz, SkyOffsetFrame +from .. import SkyCoord, EarthLocation +from ...time import Time +from ...tests.helper import assert_quantity_allclose as assert_allclose + +from ...extern.six.moves import range + + +@pytest.mark.parametrize("inradec,expectedlatlon, tolsep", [ + ((45, 45)*u.deg, (0, 0)*u.deg, .001*u.arcsec), + ((45, 0)*u.deg, (0, -45)*u.deg, .001*u.arcsec), + ((45, 90)*u.deg, (0, 45)*u.deg, .001*u.arcsec), + ((46, 45)*u.deg, (1*np.cos(45*u.deg), 0)*u.deg, 16*u.arcsec), + ]) +def test_skyoffset(inradec, expectedlatlon, tolsep, originradec=(45, 45)*u.deg): + origin = ICRS(*originradec) + skyoffset_frame = SkyOffsetFrame(origin=origin) + + skycoord = SkyCoord(*inradec, frame=ICRS) + skycoord_inaf = skycoord.transform_to(skyoffset_frame) + assert hasattr(skycoord_inaf, 'lon') + assert hasattr(skycoord_inaf, 'lat') + expected = SkyCoord(*expectedlatlon, frame=skyoffset_frame) + + assert skycoord_inaf.separation(expected) < tolsep + + +def test_skyoffset_functional_ra(): + # we do the 12)[1:-1] business because sometimes machine precision issues + # lead to results that are either ~0 or ~360, which mucks up the final + # comparison and leads to spurious failures. So this just avoids that by + # staying away from the edges + input_ra = np.linspace(0, 360, 12)[1:-1] + input_dec = np.linspace(-90, 90, 12)[1:-1] + icrs_coord = ICRS(ra=input_ra*u.deg, + dec=input_dec*u.deg, + distance=1.*u.kpc) + + for ra in np.linspace(0, 360, 24): + # expected rotation + expected = ICRS(ra=np.linspace(0-ra, 360-ra, 12)[1:-1]*u.deg, + dec=np.linspace(-90, 90, 12)[1:-1]*u.deg, + distance=1.*u.kpc) + expected_xyz = expected.cartesian.xyz + + # actual transformation to the frame + skyoffset_frame = SkyOffsetFrame(origin=ICRS(ra*u.deg, 0*u.deg)) + actual = icrs_coord.transform_to(skyoffset_frame) + actual_xyz = actual.cartesian.xyz + + # back to ICRS + roundtrip = actual.transform_to(ICRS) + roundtrip_xyz = roundtrip.cartesian.xyz + + # Verify + assert_allclose(actual_xyz, expected_xyz, atol=1E-5*u.kpc) + assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1E-5*u.deg) + assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1E-5*u.deg) + assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1E-5*u.kpc) + + +def test_skyoffset_functional_dec(): + # we do the 12)[1:-1] business because sometimes machine precision issues + # lead to results that are either ~0 or ~360, which mucks up the final + # comparison and leads to spurious failures. So this just avoids that by + # staying away from the edges + input_ra = np.linspace(0, 360, 12)[1:-1] + input_dec = np.linspace(-90, 90, 12)[1:-1] + input_ra_rad = np.deg2rad(input_ra) + input_dec_rad = np.deg2rad(input_dec) + icrs_coord = ICRS(ra=input_ra*u.deg, + dec=input_dec*u.deg, + distance=1.*u.kpc) + # Dec rotations + # Done in xyz space because dec must be [-90,90] + + for dec in np.linspace(-90, 90, 13): + # expected rotation + dec_rad = -np.deg2rad(dec) + expected_x = (-np.sin(input_dec_rad) * np.sin(dec_rad) + + np.cos(input_ra_rad) * np.cos(input_dec_rad) * np.cos(dec_rad)) + expected_y = (np.sin(input_ra_rad) * np.cos(input_dec_rad)) + expected_z = (np.sin(input_dec_rad) * np.cos(dec_rad) + + np.sin(dec_rad) * np.cos(input_ra_rad) * np.cos(input_dec_rad)) + expected = SkyCoord(x=expected_x, + y=expected_y, + z=expected_z, unit='kpc', representation='cartesian') + expected_xyz = expected.cartesian.xyz + + # actual transformation to the frame + skyoffset_frame = SkyOffsetFrame(origin=ICRS(0*u.deg, dec*u.deg)) + actual = icrs_coord.transform_to(skyoffset_frame) + actual_xyz = actual.cartesian.xyz + + # back to ICRS + roundtrip = actual.transform_to(ICRS) + + # Verify + assert_allclose(actual_xyz, expected_xyz, atol=1E-5*u.kpc) + assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1E-5*u.deg) + assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1E-5*u.deg) + assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1E-5*u.kpc) + + +def test_skyoffset_functional_ra_dec(): + # we do the 12)[1:-1] business because sometimes machine precision issues + # lead to results that are either ~0 or ~360, which mucks up the final + # comparison and leads to spurious failures. So this just avoids that by + # staying away from the edges + input_ra = np.linspace(0, 360, 12)[1:-1] + input_dec = np.linspace(-90, 90, 12)[1:-1] + input_ra_rad = np.deg2rad(input_ra) + input_dec_rad = np.deg2rad(input_dec) + icrs_coord = ICRS(ra=input_ra*u.deg, + dec=input_dec*u.deg, + distance=1.*u.kpc) + + for ra in np.linspace(0, 360, 10): + for dec in np.linspace(-90, 90, 5): + # expected rotation + dec_rad = -np.deg2rad(dec) + ra_rad = np.deg2rad(ra) + expected_x = (-np.sin(input_dec_rad) * np.sin(dec_rad) + + np.cos(input_ra_rad) * np.cos(input_dec_rad) * np.cos(dec_rad) * np.cos(ra_rad) + + np.sin(input_ra_rad) * np.cos(input_dec_rad) * np.cos(dec_rad) * np.sin(ra_rad)) + expected_y = (np.sin(input_ra_rad) * np.cos(input_dec_rad) * np.cos(ra_rad) - + np.cos(input_ra_rad) * np.cos(input_dec_rad) * np.sin(ra_rad)) + expected_z = (np.sin(input_dec_rad) * np.cos(dec_rad) + + np.sin(dec_rad) * np.cos(ra_rad) * np.cos(input_ra_rad) * np.cos(input_dec_rad) + + np.sin(dec_rad) * np.sin(ra_rad) * np.sin(input_ra_rad) * np.cos(input_dec_rad)) + expected = SkyCoord(x=expected_x, + y=expected_y, + z=expected_z, unit='kpc', representation='cartesian') + expected_xyz = expected.cartesian.xyz + + # actual transformation to the frame + skyoffset_frame = SkyOffsetFrame(origin=ICRS(ra*u.deg, dec*u.deg)) + actual = icrs_coord.transform_to(skyoffset_frame) + actual_xyz = actual.cartesian.xyz + + # back to ICRS + roundtrip = actual.transform_to(ICRS) + + # Verify + assert_allclose(actual_xyz, expected_xyz, atol=1E-5*u.kpc) + assert_allclose(icrs_coord.ra, roundtrip.ra, atol=1E-4*u.deg) + assert_allclose(icrs_coord.dec, roundtrip.dec, atol=1E-5*u.deg) + assert_allclose(icrs_coord.distance, roundtrip.distance, atol=1E-5*u.kpc) + + +def test_skycoord_skyoffset_frame(): + m31 = SkyCoord(10.6847083, 41.26875, frame='icrs', unit=u.deg) + m33 = SkyCoord(23.4621, 30.6599417, frame='icrs', unit=u.deg) + + m31_astro = m31.skyoffset_frame() + m31_in_m31 = m31.transform_to(m31_astro) + m33_in_m31 = m33.transform_to(m31_astro) + + assert_allclose([m31_in_m31.lon, m31_in_m31.lat], [0, 0]*u.deg, atol=1e-10*u.deg) + assert_allclose([m33_in_m31.lon, m33_in_m31.lat], [11.13135175, -9.79084759]*u.deg) + + assert_allclose(m33.separation(m31), + np.hypot(m33_in_m31.lon, m33_in_m31.lat), + atol=.1*u.deg) + + +# used below in the next parametrized test +m31_sys = [ICRS, FK5, Galactic] +m31_coo = [(10.6847929, 41.2690650), (10.6847929, 41.2690650), (121.1744050, -21.5729360)] +m31_dist = Distance(770, u.kpc) +convert_precision = 1 * u.arcsec +roundtrip_precision = 1e-4 * u.degree +dist_precision = 1e-9 * u.kpc + +m31_params = [] +for i in range(len(m31_sys)): + for j in range(len(m31_sys)): + if i < j: + m31_params.append((m31_sys[i], m31_sys[j], m31_coo[i], m31_coo[j])) + + +@pytest.mark.parametrize(('fromsys', 'tosys', 'fromcoo', 'tocoo'), m31_params) +def test_m31_coord_transforms(fromsys, tosys, fromcoo, tocoo): + """ + This tests a variety of coordinate conversions for the Chandra point-source + catalog location of M31 from NED, via SkyOffsetFrames + """ + from_origin = fromsys(fromcoo[0]*u.deg, fromcoo[1]*u.deg, + distance=m31_dist) + from_pos = SkyOffsetFrame(1*u.deg, 1*u.deg, origin=from_origin) + to_origin = tosys(tocoo[0]*u.deg, tocoo[1]*u.deg, distance=m31_dist) + + to_astroframe = SkyOffsetFrame(origin=to_origin) + target_pos = from_pos.transform_to(to_astroframe) + + assert_allclose(to_origin.separation(target_pos), + np.hypot(from_pos.lon, from_pos.lat), + atol=convert_precision) + roundtrip_pos = target_pos.transform_to(from_pos) + assert_allclose([roundtrip_pos.lon.wrap_at(180*u.deg), roundtrip_pos.lat], + [1.0*u.deg, 1.0*u.deg], atol=convert_precision) + + +def test_altaz_attribute_transforms(): + """Test transforms between AltAz frames with different attributes.""" + el1 = EarthLocation(0*u.deg, 0*u.deg, 0*u.m) + origin1 = AltAz(0 * u.deg, 0*u.deg, obstime=Time("2000-01-01T12:00:00"), + location=el1) + frame1 = SkyOffsetFrame(origin=origin1) + coo1 = SkyCoord(1 * u.deg, 1 * u.deg, frame=frame1) + + el2 = EarthLocation(0*u.deg, 0*u.deg, 0*u.m) + origin2 = AltAz(0 * u.deg, 0*u.deg, obstime=Time("2000-01-01T11:00:00"), + location=el2) + frame2 = SkyOffsetFrame(origin=origin2) + coo2 = coo1.transform_to(frame2) + coo2_expected = [1.22522446, 0.70624298] * u.deg + assert_allclose([coo2.lon.wrap_at(180*u.deg), coo2.lat], + coo2_expected, atol=convert_precision) + + el3 = EarthLocation(0*u.deg, 90*u.deg, 0*u.m) + origin3 = AltAz(0 * u.deg, 90*u.deg, obstime=Time("2000-01-01T12:00:00"), + location=el3) + frame3 = SkyOffsetFrame(origin=origin3) + coo3 = coo2.transform_to(frame3) + assert_allclose([coo3.lon.wrap_at(180*u.deg), coo3.lat], + [1*u.deg, 1*u.deg], atol=convert_precision) + + +@pytest.mark.parametrize("rotation, expectedlatlon", [ + (0*u.deg, [0, 1]*u.deg), + (180*u.deg, [0, -1]*u.deg), + (90*u.deg, [-1, 0]*u.deg), + (-90*u.deg, [1, 0]*u.deg) + ]) +def test_rotation(rotation, expectedlatlon): + origin = ICRS(45*u.deg, 45*u.deg) + target = ICRS(45*u.deg, 46*u.deg) + + aframe = SkyOffsetFrame(origin=origin, rotation=rotation) + trans = target.transform_to(aframe) + + assert_allclose([trans.lon.wrap_at(180*u.deg), trans.lat], + expectedlatlon, atol=1e-10*u.deg) + + +@pytest.mark.parametrize("rotation, expectedlatlon", [ + (0*u.deg, [0, 1]*u.deg), + (180*u.deg, [0, -1]*u.deg), + (90*u.deg, [-1, 0]*u.deg), + (-90*u.deg, [1, 0]*u.deg) + ]) +def test_skycoord_skyoffset_frame_rotation(rotation, expectedlatlon): + """Test if passing a rotation argument via SkyCoord works""" + origin = SkyCoord(45*u.deg, 45*u.deg) + target = SkyCoord(45*u.deg, 46*u.deg) + + aframe = origin.skyoffset_frame(rotation=rotation) + trans = target.transform_to(aframe) + + assert_allclose([trans.lon.wrap_at(180*u.deg), trans.lat], + expectedlatlon, atol=1e-10*u.deg) + + +def test_skyoffset_names(): + origin1 = ICRS(45*u.deg, 45*u.deg) + aframe1 = SkyOffsetFrame(origin=origin1) + assert type(aframe1).__name__ == 'SkyOffsetICRS' + + origin2 = Galactic(45*u.deg, 45*u.deg) + aframe2 = SkyOffsetFrame(origin=origin2) + assert type(aframe2).__name__ == 'SkyOffsetGalactic' + + +def test_skyoffset_origindata(): + origin = ICRS() + with pytest.raises(ValueError): + SkyOffsetFrame(origin=origin) + + +def test_skyoffset_lonwrap(): + origin = ICRS(45*u.deg, 45*u.deg) + sc = SkyCoord(190*u.deg, -45*u.deg, frame=SkyOffsetFrame(origin=origin)) + assert sc.lon < 180 * u.deg + + +def test_skyoffset_velerr(): + # TODO: remove this when the SkyOffsetFrame's support velocities + origin = ICRS(45*u.deg, 45*u.deg) + originwvel = ICRS(45*u.deg, 45*u.deg, radial_velocity=1*u.km/u.s) + + SkyOffsetFrame(origin=origin) + with pytest.raises(NotImplementedError): + SkyOffsetFrame(origin=originwvel) + SkyOffsetFrame(origin.data, origin=origin) + with pytest.raises(NotImplementedError): + SkyOffsetFrame(originwvel.data, origin=origin) + with pytest.raises(NotImplementedError): + SkyOffsetFrame(origin.data, origin=originwvel) + with pytest.raises(NotImplementedError): + SkyOffsetFrame(originwvel.data, origin=originwvel) diff --git a/astropy/coordinates/tests/test_solar_system.py b/astropy/coordinates/tests/test_solar_system.py new file mode 100644 index 0000000..5580ea2 --- /dev/null +++ b/astropy/coordinates/tests/test_solar_system.py @@ -0,0 +1,365 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest +import numpy as np + +from ...time import Time +from ... import units as u +from ...constants import c +from ..builtin_frames import GCRS +from ..earth import EarthLocation +from ..sky_coordinate import SkyCoord +from ..solar_system import (get_body, get_moon, BODY_NAME_TO_KERNEL_SPEC, + _apparent_position_in_true_coordinates, + get_body_barycentric, get_body_barycentric_posvel) +from ..funcs import get_sun +from ...tests.helper import (remote_data, assert_quantity_allclose, + quantity_allclose) + +try: + import jplephem # pylint: disable=W0611 +except ImportError: + HAS_JPLEPHEM = False +else: + HAS_JPLEPHEM = True + +try: + from skyfield.api import load # pylint: disable=W0611 +except ImportError: + HAS_SKYFIELD = False +else: + HAS_SKYFIELD = True + +de432s_separation_tolerance_planets = 5*u.arcsec +de432s_separation_tolerance_moon = 5*u.arcsec +de432s_distance_tolerance = 20*u.km + +skyfield_angular_separation_tolerance = 1*u.arcsec +skyfield_separation_tolerance = 10*u.km + + +@remote_data +@pytest.mark.skipif(str('not HAS_SKYFIELD')) +def test_positions_skyfield(): + """ + Test positions against those generated by skyfield. + """ + + t = Time('1980-03-25 00:00') + location = None + + # skyfield ephemeris + planets = load('de421.bsp') + ts = load.timescale() + mercury, jupiter, moon = planets['mercury'], planets['jupiter barycenter'], planets['moon'] + earth = planets['earth'] + + skyfield_t = ts.from_astropy(t) + + if location is not None: + earth = earth.topos(latitude_degrees=location.lat.to_value(u.deg), + longitude_degrees=location.lon.to_value(u.deg), + elevation_m=location.height.to_value(u.m)) + + skyfield_mercury = earth.at(skyfield_t).observe(mercury).apparent() + skyfield_jupiter = earth.at(skyfield_t).observe(jupiter).apparent() + skyfield_moon = earth.at(skyfield_t).observe(moon).apparent() + + if location is not None: + obsgeoloc, obsgeovel = location.get_gcrs_posvel(t) + frame = GCRS(obstime=t, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel) + else: + frame = GCRS(obstime=t) + + ra, dec, dist = skyfield_mercury.radec(epoch='date') + skyfield_mercury = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km), + frame=frame) + ra, dec, dist = skyfield_jupiter.radec(epoch='date') + skyfield_jupiter = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km), + frame=frame) + ra, dec, dist = skyfield_moon.radec(epoch='date') + skyfield_moon = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km), + frame=frame) + + moon_astropy = get_moon(t, location, ephemeris='de430') + mercury_astropy = get_body('mercury', t, location, ephemeris='de430') + jupiter_astropy = get_body('jupiter', t, location, ephemeris='de430') + + # convert to true equator and equinox + jupiter_astropy = _apparent_position_in_true_coordinates(jupiter_astropy) + mercury_astropy = _apparent_position_in_true_coordinates(mercury_astropy) + moon_astropy = _apparent_position_in_true_coordinates(moon_astropy) + + assert (moon_astropy.separation(skyfield_moon) < + skyfield_angular_separation_tolerance) + assert (moon_astropy.separation_3d(skyfield_moon) < skyfield_separation_tolerance) + + assert (jupiter_astropy.separation(skyfield_jupiter) < + skyfield_angular_separation_tolerance) + assert (jupiter_astropy.separation_3d(skyfield_jupiter) < + skyfield_separation_tolerance) + + assert (mercury_astropy.separation(skyfield_mercury) < + skyfield_angular_separation_tolerance) + assert (mercury_astropy.separation_3d(skyfield_mercury) < + skyfield_separation_tolerance) + + +class TestPositionsGeocentric(object): + """ + Test positions against those generated by JPL Horizons accessed on + 2016-03-28, with refraction turned on. + """ + + def setup(self): + self.t = Time('1980-03-25 00:00') + self.frame = GCRS(obstime=self.t) + # Results returned by JPL Horizons web interface + self.horizons = { + 'mercury': SkyCoord(ra='22h41m47.78s', dec='-08d29m32.0s', + distance=c*6.323037*u.min, frame=self.frame), + 'moon': SkyCoord(ra='07h32m02.62s', dec='+18d34m05.0s', + distance=c*0.021921*u.min, frame=self.frame), + 'jupiter': SkyCoord(ra='10h17m12.82s', dec='+12d02m57.0s', + distance=c*37.694557*u.min, frame=self.frame), + 'sun': SkyCoord(ra='00h16m31.00s', dec='+01d47m16.9s', + distance=c*8.294858*u.min, frame=self.frame)} + + @pytest.mark.parametrize(('body', 'sep_tol', 'dist_tol'), + (('mercury', 7.*u.arcsec, 1000*u.km), + ('jupiter', 78.*u.arcsec, 76000*u.km), + ('moon', 20.*u.arcsec, 80*u.km), + ('sun', 5.*u.arcsec, 11.*u.km))) + def test_erfa_planet(self, body, sep_tol, dist_tol): + """Test predictions using erfa/plan94. + + Accuracies are maximum deviations listed in erfa/plan94.c, for Jupiter and + Mercury, and that quoted in Meeus "Astronomical Algorithms" (1998) for the Moon. + """ + astropy = get_body(body, self.t, ephemeris='builtin') + horizons = self.horizons[body] + + # convert to true equator and equinox + astropy = _apparent_position_in_true_coordinates(astropy) + + # Assert sky coordinates are close. + assert astropy.separation(horizons) < sep_tol + + # Assert distances are close. + assert_quantity_allclose(astropy.distance, horizons.distance, + atol=dist_tol) + + @remote_data + @pytest.mark.skipif('not HAS_JPLEPHEM') + @pytest.mark.parametrize('body', ('mercury', 'jupiter', 'sun')) + def test_de432s_planet(self, body): + astropy = get_body(body, self.t, ephemeris='de432s') + horizons = self.horizons[body] + + # convert to true equator and equinox + astropy = _apparent_position_in_true_coordinates(astropy) + + # Assert sky coordinates are close. + assert (astropy.separation(horizons) < + de432s_separation_tolerance_planets) + + # Assert distances are close. + assert_quantity_allclose(astropy.distance, horizons.distance, + atol=de432s_distance_tolerance) + + @remote_data + @pytest.mark.skipif('not HAS_JPLEPHEM') + def test_de432s_moon(self): + astropy = get_moon(self.t, ephemeris='de432s') + horizons = self.horizons['moon'] + + # convert to true equator and equinox + astropy = _apparent_position_in_true_coordinates(astropy) + + # Assert sky coordinates are close. + assert (astropy.separation(horizons) < + de432s_separation_tolerance_moon) + + # Assert distances are close. + assert_quantity_allclose(astropy.distance, horizons.distance, + atol=de432s_distance_tolerance) + + +class TestPositionKittPeak(object): + """ + Test positions against those generated by JPL Horizons accessed on + 2016-03-28, with refraction turned on. + """ + + def setup(self): + kitt_peak = EarthLocation.from_geodetic(lon=-111.6*u.deg, + lat=31.963333333333342*u.deg, + height=2120*u.m) + self.t = Time('2014-09-25T00:00', location=kitt_peak) + obsgeoloc, obsgeovel = kitt_peak.get_gcrs_posvel(self.t) + self.frame = GCRS(obstime=self.t, + obsgeoloc=obsgeoloc, obsgeovel=obsgeovel) + # Results returned by JPL Horizons web interface + self.horizons = { + 'mercury': SkyCoord(ra='13h38m58.50s', dec='-13d34m42.6s', + distance=c*7.699020*u.min, frame=self.frame), + 'moon': SkyCoord(ra='12h33m12.85s', dec='-05d17m54.4s', + distance=c*0.022054*u.min, frame=self.frame), + 'jupiter': SkyCoord(ra='09h09m55.55s', dec='+16d51m57.8s', + distance=c*49.244937*u.min, frame=self.frame)} + + @pytest.mark.parametrize(('body', 'sep_tol', 'dist_tol'), + (('mercury', 7.*u.arcsec, 500*u.km), + ('jupiter', 78.*u.arcsec, 82000*u.km))) + def test_erfa_planet(self, body, sep_tol, dist_tol): + """Test predictions using erfa/plan94. + + Accuracies are maximum deviations listed in erfa/plan94.c. + """ + # Add uncertainty in position of Earth + dist_tol = dist_tol + 1300 * u.km + + astropy = get_body(body, self.t, ephemeris='builtin') + horizons = self.horizons[body] + + # convert to true equator and equinox + astropy = _apparent_position_in_true_coordinates(astropy) + + # Assert sky coordinates are close. + assert astropy.separation(horizons) < sep_tol + + # Assert distances are close. + assert_quantity_allclose(astropy.distance, horizons.distance, + atol=dist_tol) + + @remote_data + @pytest.mark.skipif('not HAS_JPLEPHEM') + @pytest.mark.parametrize('body', ('mercury', 'jupiter')) + def test_de432s_planet(self, body): + astropy = get_body(body, self.t, ephemeris='de432s') + horizons = self.horizons[body] + + # convert to true equator and equinox + astropy = _apparent_position_in_true_coordinates(astropy) + + # Assert sky coordinates are close. + assert (astropy.separation(horizons) < + de432s_separation_tolerance_planets) + + # Assert distances are close. + assert_quantity_allclose(astropy.distance, horizons.distance, + atol=de432s_distance_tolerance) + + @remote_data + @pytest.mark.skipif('not HAS_JPLEPHEM') + def test_de432s_moon(self): + astropy = get_moon(self.t, ephemeris='de432s') + horizons = self.horizons['moon'] + + # convert to true equator and equinox + astropy = _apparent_position_in_true_coordinates(astropy) + + # Assert sky coordinates are close. + assert (astropy.separation(horizons) < + de432s_separation_tolerance_moon) + + # Assert distances are close. + assert_quantity_allclose(astropy.distance, horizons.distance, + atol=de432s_distance_tolerance) + + @remote_data + @pytest.mark.skipif('not HAS_JPLEPHEM') + @pytest.mark.parametrize('bodyname', ('mercury', 'jupiter')) + def test_custom_kernel_spec_body(self, bodyname): + """ + Checks that giving a kernel specifier instead of a body name works + """ + coord_by_name = get_body(bodyname, self.t, ephemeris='de432s') + kspec = BODY_NAME_TO_KERNEL_SPEC[bodyname] + coord_by_kspec = get_body(kspec, self.t, ephemeris='de432s') + + assert_quantity_allclose(coord_by_name.ra, coord_by_kspec.ra) + assert_quantity_allclose(coord_by_name.dec, coord_by_kspec.dec) + assert_quantity_allclose(coord_by_name.distance, coord_by_kspec.distance) + + +@remote_data +@pytest.mark.skipif('not HAS_JPLEPHEM') +@pytest.mark.parametrize('time', (Time('1960-01-12 00:00'), + Time('1980-03-25 00:00'), + Time('2010-10-13 00:00'))) +def test_get_sun_consistency(time): + """ + Test that the sun from JPL and the builtin get_sun match + """ + sun_jpl_gcrs = get_body('sun', time, ephemeris='de432s') + builtin_get_sun = get_sun(time) + sep = builtin_get_sun.separation(sun_jpl_gcrs) + assert sep < 0.1*u.arcsec + + +def test_get_moon_nonscalar_regression(): + """ + Test that the builtin ephemeris works with non-scalar times. + + See Issue #5069. + """ + times = Time(["2015-08-28 03:30", "2015-09-05 10:30"]) + # the following line will raise an Exception if the bug recurs. + get_moon(times, ephemeris='builtin') + + +def test_barycentric_pos_posvel_same(): + # Check that the two routines give identical results. + ep1 = get_body_barycentric('earth', Time('2016-03-20T12:30:00')) + ep2, _ = get_body_barycentric_posvel('earth', Time('2016-03-20T12:30:00')) + assert np.all(ep1.xyz == ep2.xyz) + + +def test_earth_barycentric_velocity_rough(): + # Check that a time near the equinox gives roughly the right result. + ep, ev = get_body_barycentric_posvel('earth', Time('2016-03-20T12:30:00')) + assert_quantity_allclose(ep.xyz, [-1., 0., 0.]*u.AU, atol=0.01*u.AU) + expected = u.Quantity([0.*u.one, + np.cos(23.5*u.deg), + np.sin(23.5*u.deg)]) * -30. * u.km / u.s + assert_quantity_allclose(ev.xyz, expected, atol=1.*u.km/u.s) + + +def test_earth_barycentric_velocity_multi_d(): + # Might as well test it with a multidimensional array too. + t = Time('2016-03-20T12:30:00') + np.arange(8.).reshape(2, 2, 2) * u.yr / 2. + ep, ev = get_body_barycentric_posvel('earth', t) + # note: assert_quantity_allclose doesn't like the shape mismatch. + # this is a problem with np.testing.assert_allclose. + assert quantity_allclose(ep.get_xyz(xyz_axis=-1), + [[-1., 0., 0.], [+1., 0., 0.]]*u.AU, + atol=0.06*u.AU) + expected = u.Quantity([0.*u.one, + np.cos(23.5*u.deg), + np.sin(23.5*u.deg)]) * ([[-30.], [30.]] * u.km / u.s) + assert quantity_allclose(ev.get_xyz(xyz_axis=-1), expected, + atol=2.*u.km/u.s) + + +@remote_data +@pytest.mark.skipif('not HAS_JPLEPHEM') +@pytest.mark.parametrize(('body', 'pos_tol', 'vel_tol'), + (('mercury', 1000.*u.km, 1.*u.km/u.s), + ('jupiter', 100000.*u.km, 2.*u.km/u.s), + ('earth', 10*u.km, 10*u.mm/u.s))) +def test_barycentric_velocity_consistency(body, pos_tol, vel_tol): + # Tolerances are about 1.5 times the rms listed for plan94 and epv00, + # except for Mercury (which nominally is 334 km rms) + t = Time('2016-03-20T12:30:00') + ep, ev = get_body_barycentric_posvel(body, t, ephemeris='builtin') + dp, dv = get_body_barycentric_posvel(body, t, ephemeris='de432s') + assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol) + assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol) + # Might as well test it with a multidimensional array too. + t = Time('2016-03-20T12:30:00') + np.arange(8.).reshape(2, 2, 2) * u.yr / 2. + ep, ev = get_body_barycentric_posvel(body, t, ephemeris='builtin') + dp, dv = get_body_barycentric_posvel(body, t, ephemeris='de432s') + assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol) + assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol) diff --git a/astropy/coordinates/tests/test_transformations.py b/astropy/coordinates/tests/test_transformations.py new file mode 100644 index 0000000..daf2505 --- /dev/null +++ b/astropy/coordinates/tests/test_transformations.py @@ -0,0 +1,434 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np +import pytest + +from ... import units as u +from .. import transformations as t +from ..builtin_frames import ICRS, FK5, FK4, FK4NoETerms, Galactic, AltAz +from .. import representation as r +from ..baseframe import frame_transform_graph +from ...tests.helper import (assert_quantity_allclose as assert_allclose, + quantity_allclose, catch_warnings) +from ...time import Time + + +# Coordinates just for these tests. +class TCoo1(ICRS): + pass + + +class TCoo2(ICRS): + pass + + +class TCoo3(ICRS): + pass + + +def test_transform_classes(): + """ + Tests the class-based/OO syntax for creating transforms + """ + + tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec) + trans1 = t.FunctionTransform(tfun, TCoo1, TCoo2, + register_graph=frame_transform_graph) + + c1 = TCoo1(ra=1*u.radian, dec=0.5*u.radian) + c2 = c1.transform_to(TCoo2) + assert_allclose(c2.ra.radian, 1) + assert_allclose(c2.dec.radian, 0.5) + + def matfunc(coo, fr): + return [[1, 0, 0], + [0, coo.ra.degree, 0], + [0, 0, 1]] + trans2 = t.DynamicMatrixTransform(matfunc, TCoo1, TCoo2) + trans2.register(frame_transform_graph) + + c3 = TCoo1(ra=1*u.deg, dec=2*u.deg) + c4 = c3.transform_to(TCoo2) + + assert_allclose(c4.ra.degree, 1) + assert_allclose(c4.ra.degree, 1) + + # be sure to unregister the second one - no need for trans1 because it + # already got unregistered when trans2 was created. + trans2.unregister(frame_transform_graph) + + +def test_transform_decos(): + """ + Tests the decorator syntax for creating transforms + """ + c1 = TCoo1(ra=1*u.deg, dec=2*u.deg) + + @frame_transform_graph.transform(t.FunctionTransform, TCoo1, TCoo2) + def trans(coo1, f): + return TCoo2(ra=coo1.ra, dec=coo1.dec * 2) + + c2 = c1.transform_to(TCoo2) + assert_allclose(c2.ra.degree, 1) + assert_allclose(c2.dec.degree, 4) + + c3 = TCoo1(r.CartesianRepresentation(x=1*u.pc, y=1*u.pc, z=2*u.pc)) + + @frame_transform_graph.transform(t.StaticMatrixTransform, TCoo1, TCoo2) + def matrix(): + return [[2, 0, 0], + [0, 1, 0], + [0, 0, 1]] + + c4 = c3.transform_to(TCoo2) + + assert_allclose(c4.cartesian.x, 2*u.pc) + assert_allclose(c4.cartesian.y, 1*u.pc) + assert_allclose(c4.cartesian.z, 2*u.pc) + + +def test_shortest_path(): + class FakeTransform(object): + def __init__(self, pri): + self.priority = pri + + g = t.TransformGraph() + + # cheating by adding graph elements directly that are not classes - the + # graphing algorithm still works fine with integers - it just isn't a valid + # TransformGraph + + # the graph looks is a down-going diamond graph with the lower-right slightly + # heavier and a cycle from the bottom to the top + # also, a pair of nodes isolated from 1 + + g._graph[1][2] = FakeTransform(1) + g._graph[1][3] = FakeTransform(1) + g._graph[2][4] = FakeTransform(1) + g._graph[3][4] = FakeTransform(2) + g._graph[4][1] = FakeTransform(5) + + g._graph[5][6] = FakeTransform(1) + + path, d = g.find_shortest_path(1, 2) + assert path == [1, 2] + assert d == 1 + path, d = g.find_shortest_path(1, 3) + assert path == [1, 3] + assert d == 1 + path, d = g.find_shortest_path(1, 4) + print('Cached paths:', g._shortestpaths) + assert path == [1, 2, 4] + assert d == 2 + + # unreachable + path, d = g.find_shortest_path(1, 5) + assert path is None + assert d == float('inf') + + path, d = g.find_shortest_path(5, 6) + assert path == [5, 6] + assert d == 1 + + +def test_sphere_cart(): + """ + Tests the spherical <-> cartesian transform functions + """ + from ...utils import NumpyRNGContext + from .. import spherical_to_cartesian, cartesian_to_spherical + + x, y, z = spherical_to_cartesian(1, 0, 0) + assert_allclose(x, 1) + assert_allclose(y, 0) + assert_allclose(z, 0) + + x, y, z = spherical_to_cartesian(0, 1, 1) + assert_allclose(x, 0) + assert_allclose(y, 0) + assert_allclose(z, 0) + + x, y, z = spherical_to_cartesian(5, 0, np.arcsin(4. / 5.)) + assert_allclose(x, 3) + assert_allclose(y, 4) + assert_allclose(z, 0) + + r, lat, lon = cartesian_to_spherical(0, 1, 0) + assert_allclose(r, 1) + assert_allclose(lat, 0 * u.deg) + assert_allclose(lon, np.pi / 2 * u.rad) + + # test round-tripping + with NumpyRNGContext(13579): + x, y, z = np.random.randn(3, 5) + + r, lat, lon = cartesian_to_spherical(x, y, z) + x2, y2, z2 = spherical_to_cartesian(r, lat, lon) + + assert_allclose(x, x2) + assert_allclose(y, y2) + assert_allclose(z, z2) + + +def test_transform_path_pri(): + """ + This checks that the transformation path prioritization works by + making sure the ICRS -> Gal transformation always goes through FK5 + and not FK4. + """ + frame_transform_graph.invalidate_cache() + tpath, td = frame_transform_graph.find_shortest_path(ICRS, Galactic) + assert tpath == [ICRS, FK5, Galactic] + assert td == 2 + + # but direct from FK4 to Galactic should still be possible + tpath, td = frame_transform_graph.find_shortest_path(FK4, Galactic) + assert tpath == [FK4, FK4NoETerms, Galactic] + assert td == 2 + + +def test_obstime(): + """ + Checks to make sure observation time is + accounted for at least in FK4 <-> ICRS transformations + """ + b1950 = Time('B1950', scale='utc') + j1975 = Time('J1975', scale='utc') + + fk4_50 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=b1950) + fk4_75 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=j1975) + + icrs_50 = fk4_50.transform_to(ICRS) + icrs_75 = fk4_75.transform_to(ICRS) + + # now check that the resulting coordinates are *different* - they should be, + # because the obstime is different + assert icrs_50.ra.degree != icrs_75.ra.degree + assert icrs_50.dec.degree != icrs_75.dec.degree + +# ------------------------------------------------------------------------------ +# Affine transform tests and helpers: + +# just acting as a namespace + + +class transfunc(object): + rep = r.CartesianRepresentation(np.arange(3)*u.pc) + dif = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr) + rep0 = r.CartesianRepresentation(np.zeros(3)*u.pc) + + @classmethod + def both(cls, coo, fr): + # exchange x <-> z and offset + M = np.array([[0., 0., 1.], + [0., 1., 0.], + [1., 0., 0.]]) + return M, cls.rep.with_differentials(cls.dif) + + @classmethod + def just_matrix(cls, coo, fr): + # exchange x <-> z and offset + M = np.array([[0., 0., 1.], + [0., 1., 0.], + [1., 0., 0.]]) + return M, None + + @classmethod + def no_matrix(cls, coo, fr): + return None, cls.rep.with_differentials(cls.dif) + + @classmethod + def no_pos(cls, coo, fr): + return None, cls.rep0.with_differentials(cls.dif) + + @classmethod + def no_vel(cls, coo, fr): + return None, cls.rep + + +@pytest.mark.parametrize('transfunc', [transfunc.both, transfunc.no_matrix, + transfunc.no_pos, transfunc.no_vel, + transfunc.just_matrix]) +@pytest.mark.parametrize('rep', [ + r.CartesianRepresentation(5, 6, 7, unit=u.pc), + r.CartesianRepresentation(5, 6, 7, unit=u.pc, + differentials=r.CartesianDifferential(8, 9, 10, + unit=u.pc/u.Myr)), + r.CartesianRepresentation(5, 6, 7, unit=u.pc, + differentials=r.CartesianDifferential(8, 9, 10, + unit=u.pc/u.Myr)) + .represent_as(r.CylindricalRepresentation, r.CylindricalDifferential) +]) +def test_affine_transform_succeed(transfunc, rep): + c = TCoo1(rep) + + # compute expected output + M, offset = transfunc(c, TCoo2) + + _rep = rep.to_cartesian() + diffs = dict([(k, diff.represent_as(r.CartesianDifferential, rep)) + for k, diff in rep.differentials.items()]) + expected_rep = _rep.with_differentials(diffs) + + if M is not None: + expected_rep = expected_rep.transform(M) + + expected_pos = expected_rep.without_differentials() + if offset is not None: + expected_pos = expected_pos + offset.without_differentials() + + expected_vel = None + if c.data.differentials: + expected_vel = expected_rep.differentials['s'] + + if offset and offset.differentials: + expected_vel = (expected_vel + offset.differentials['s']) + + # register and do the transformation and check against expected + trans = t.AffineTransform(transfunc, TCoo1, TCoo2) + trans.register(frame_transform_graph) + + c2 = c.transform_to(TCoo2) + + assert quantity_allclose(c2.data.to_cartesian().xyz, + expected_pos.to_cartesian().xyz) + + if expected_vel is not None: + diff = c2.data.differentials['s'].to_cartesian(base=c2.data) + assert quantity_allclose(diff.xyz, expected_vel.d_xyz) + + trans.unregister(frame_transform_graph) + + +# these should fail +def transfunc_invalid_matrix(coo, fr): + return np.eye(4), None + +# Leaving this open in case we want to add more functions to check for failures + + +@pytest.mark.parametrize('transfunc', [transfunc_invalid_matrix]) +def test_affine_transform_fail(transfunc): + diff = r.CartesianDifferential(8, 9, 10, unit=u.pc/u.Myr) + rep = r.CartesianRepresentation(5, 6, 7, unit=u.pc, differentials=diff) + c = TCoo1(rep) + + # register and do the transformation and check against expected + trans = t.AffineTransform(transfunc, TCoo1, TCoo2) + trans.register(frame_transform_graph) + + with pytest.raises(ValueError): + c2 = c.transform_to(TCoo2) + + trans.unregister(frame_transform_graph) + + +def test_too_many_differentials(): + dif1 = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr) + dif2 = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr**2) + rep = r.CartesianRepresentation(np.arange(3)*u.pc, + differentials={'s': dif1, 's2': dif2}) + + with pytest.raises(ValueError): + c = TCoo1(rep) + + # register and do the transformation and check against expected + trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2) + trans.register(frame_transform_graph) + + # Check that if frame somehow gets through to transformation, multiple + # differentials are caught + c = TCoo1(rep.without_differentials()) + c._data = c._data.with_differentials({'s': dif1, 's2': dif2}) + with pytest.raises(ValueError): + c2 = c.transform_to(TCoo2) + + trans.unregister(frame_transform_graph) + +# A matrix transform of a unit spherical with differentials should work + + +@pytest.mark.parametrize('rep', [ + r.UnitSphericalRepresentation(lon=15*u.degree, lat=-11*u.degree, + differentials=r.SphericalDifferential(d_lon=15*u.mas/u.yr, + d_lat=11*u.mas/u.yr, + d_distance=-110*u.km/u.s)), + r.UnitSphericalRepresentation(lon=15*u.degree, lat=-11*u.degree, + differentials={'s': r.RadialDifferential(d_distance=-110*u.km/u.s)}), + r.SphericalRepresentation(lon=15*u.degree, lat=-11*u.degree, + distance=150*u.pc, + differentials={'s': r.RadialDifferential(d_distance=-110*u.km/u.s)}) +]) +def test_unit_spherical_with_differentials(rep): + + c = TCoo1(rep) + + # register and do the transformation and check against expected + trans = t.AffineTransform(transfunc.just_matrix, TCoo1, TCoo2) + trans.register(frame_transform_graph) + c2 = c.transform_to(TCoo2) + + assert 's' in rep.differentials + assert isinstance(c2.data.differentials['s'], + rep.differentials['s'].__class__) + + if isinstance(rep.differentials['s'], r.RadialDifferential): + assert c2.data.differentials['s'] is rep.differentials['s'] + + trans.unregister(frame_transform_graph) + + # should fail if we have to do offsets + trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2) + trans.register(frame_transform_graph) + + with pytest.raises(TypeError): + c.transform_to(TCoo2) + + trans.unregister(frame_transform_graph) + + +def test_vel_transformation_obstime_err(): + # TODO: replace after a final decision on PR #6280 + from ..sites import get_builtin_sites + + diff = r.CartesianDifferential([.1, .2, .3]*u.km/u.s) + rep = r.CartesianRepresentation([1, 2, 3]*u.au, differentials=diff) + + loc = get_builtin_sites()['example_site'] + + aaf = AltAz(obstime='J2010', location=loc) + aaf2 = AltAz(obstime=aaf.obstime + 3*u.day, location=loc) + aaf3 = AltAz(obstime=aaf.obstime + np.arange(3)*u.day, location=loc) + aaf4 = AltAz(obstime=aaf.obstime, location=loc) + + aa = aaf.realize_frame(rep) + + with pytest.raises(NotImplementedError) as exc: + aa.transform_to(aaf2) + assert 'cannot transform' in exc.value.args[0] + + with pytest.raises(NotImplementedError) as exc: + aa.transform_to(aaf3) + assert 'cannot transform' in exc.value.args[0] + + aa.transform_to(aaf4) + + aa.transform_to(ICRS()) + + +def test_function_transform_with_differentials(): + tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec) + ftrans = t.FunctionTransform(tfun, TCoo3, TCoo2, + register_graph=frame_transform_graph) + + t3 = TCoo3(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=1*u.marcsec/u.yr, + pm_dec=1*u.marcsec/u.yr,) + + with catch_warnings() as w: + t2 = t3.transform_to(TCoo2) + assert len(w) == 1 + assert 'they have been dropped' in str(w[0].message) diff --git a/astropy/coordinates/tests/test_unit_representation.py b/astropy/coordinates/tests/test_unit_representation.py new file mode 100644 index 0000000..1453e90 --- /dev/null +++ b/astropy/coordinates/tests/test_unit_representation.py @@ -0,0 +1,83 @@ +""" +This file tests the behaviour of subclasses of Representation and Frames +""" + +from copy import deepcopy +from collections import OrderedDict + +from astropy.coordinates import Longitude, Latitude +from astropy.coordinates.representation import (REPRESENTATION_CLASSES, + SphericalRepresentation, + UnitSphericalRepresentation) +from astropy.coordinates.baseframe import frame_transform_graph +from astropy.coordinates.transformations import FunctionTransform +from astropy.coordinates import ICRS +from astropy.coordinates.baseframe import RepresentationMapping + +import astropy.units as u + +import astropy.coordinates + +# Classes setup, borrowed from SunPy. + +# Here we define the classes *inside* the tests to make sure that we can wipe +# the slate clean when the tests have finished running. + + +def setup_function(func): + func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES) + + +def teardown_function(func): + REPRESENTATION_CLASSES.clear() + REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG) + + +def test_unit_representation_subclass(): + + class Longitude180(Longitude): + def __new__(cls, angle, unit=None, wrap_angle=180*u.deg, **kwargs): + self = super(Longitude180, cls).__new__(cls, angle, unit=unit, + wrap_angle=wrap_angle, **kwargs) + return self + + class UnitSphericalWrap180Representation(UnitSphericalRepresentation): + attr_classes = OrderedDict([('lon', Longitude180), + ('lat', Latitude)]) + recommended_units = {'lon': u.deg, 'lat': u.deg} + + class SphericalWrap180Representation(SphericalRepresentation): + attr_classes = OrderedDict([('lon', Longitude180), + ('lat', Latitude), + ('distance', u.Quantity)]) + recommended_units = {'lon': u.deg, 'lat': u.deg} + + _unit_representation = UnitSphericalWrap180Representation + + class myframe(ICRS): + default_representation = SphericalWrap180Representation + frame_specific_representation_info = { + 'spherical': [RepresentationMapping('lon', 'ra'), + RepresentationMapping('lat', 'dec')] + } + frame_specific_representation_info['unitspherical'] = \ + frame_specific_representation_info['unitsphericalwrap180'] = \ + frame_specific_representation_info['sphericalwrap180'] = \ + frame_specific_representation_info['spherical'] + + @frame_transform_graph.transform(FunctionTransform, + myframe, astropy.coordinates.ICRS) + def myframe_to_icrs(myframe_coo, icrs): + return icrs.realize_frame(myframe_coo._data) + + f = myframe(10*u.deg, 10*u.deg) + assert isinstance(f._data, UnitSphericalWrap180Representation) + assert isinstance(f.ra, Longitude180) + + g = f.transform_to(astropy.coordinates.ICRS) + assert isinstance(g, astropy.coordinates.ICRS) + assert isinstance(g._data, UnitSphericalWrap180Representation) + + frame_transform_graph.remove_transform(myframe, + astropy.coordinates.ICRS, + None) diff --git a/astropy/coordinates/tests/test_velocity_corrs.py b/astropy/coordinates/tests/test_velocity_corrs.py new file mode 100644 index 0000000..77a90b1 --- /dev/null +++ b/astropy/coordinates/tests/test_velocity_corrs.py @@ -0,0 +1,300 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest + +import numpy as np + +from ...tests.helper import assert_quantity_allclose +from ... import units as u +from ...time import Time +from .. import EarthLocation, SkyCoord, Angle +from ..sites import get_builtin_sites + + +@pytest.mark.parametrize('kind', ['heliocentric', 'barycentric']) +def test_basic(kind): + t0 = Time('2015-1-1') + loc = get_builtin_sites()['example_site'] + + sc = SkyCoord(0, 0, unit=u.deg, obstime=t0, location=loc) + rvc0 = sc.radial_velocity_correction(kind) + + assert rvc0.shape == () + assert rvc0.unit.is_equivalent(u.km/u.s) + + scs = SkyCoord(0, 0, unit=u.deg, obstime=t0 + np.arange(10)*u.day, + location=loc) + rvcs = scs.radial_velocity_correction(kind) + assert rvcs.shape == (10,) + assert rvcs.unit.is_equivalent(u.km/u.s) + + +test_input_time = Time(2457244.5, format='jd') +# test_input_loc = EarthLocation.of_site('Cerro Paranal') +# to avoid the network hit we just copy here what that yields +test_input_loc = EarthLocation.from_geodetic(lon=-70.403*u.deg, + lat=-24.6252*u.deg, + height=2635*u.m) + + +def test_helio_iraf(): + """ + Compare the heliocentric correction to the IRAF rvcorrect. + `generate_IRAF_input` function is provided to show how the comparison data + was produced + + """ + # this is based on running IRAF with the output of `generate_IRAF_input` below + rvcorr_result = """ + # RVCORRECT: Observatory parameters for European Southern Observatory: Paranal + # latitude = -24:37.5 + # longitude = 70:24.2 + # altitude = 2635 + ## HJD VOBS VHELIO VLSR VDIURNAL VLUNAR VANNUAL VSOLAR + 2457244.50120 0.00 -10.36 -20.35 -0.034 -0.001 -10.325 -9.993 + 2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656 + 2457244.50278 0.00 -2.29 -11.75 0.115 0.004 -2.413 -9.459 + 2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656 + 2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888 + 2457244.50317 0.00 -17.19 -17.44 0.078 0.001 -17.269 -0.253 + 2457244.50348 0.00 2.35 -6.21 0.192 0.006 2.156 -8.560 + 2457244.49959 0.00 2.13 -15.06 -0.078 -0.000 2.211 -17.194 + 2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888 + 2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721 + 2457244.50186 0.00 -24.47 -22.16 -0.038 -0.004 -24.433 2.313 + 2457244.50470 0.00 -11.11 -8.57 0.221 0.005 -11.332 2.534 + 2457244.50402 0.00 6.90 -0.38 0.259 0.008 6.629 -7.277 + 2457244.50051 0.00 11.53 -5.78 0.038 0.004 11.489 -17.311 + 2457244.49768 0.00 -1.84 -19.37 -0.221 -0.004 -1.612 -17.533 + 2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721 + 2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209 + 2457244.50109 0.00 -27.69 -22.90 -0.096 -0.006 -27.584 4.785 + 2457244.50457 0.00 -17.00 -9.30 0.196 0.003 -17.201 7.704 + 2457244.50532 0.00 2.62 2.97 0.340 0.009 2.276 0.349 + 2457244.50277 0.00 16.42 4.67 0.228 0.009 16.178 -11.741 + 2457244.49884 0.00 13.98 -5.48 -0.056 0.002 14.039 -19.463 + 2457244.49649 0.00 -2.84 -19.84 -0.297 -0.007 -2.533 -17.000 + 2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209 + 2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419 + 2457244.50025 0.00 -29.30 -22.47 -0.149 -0.008 -29.146 6.831 + 2457244.50398 0.00 -21.55 -9.88 0.146 0.001 -21.700 11.670 + 2457244.50577 0.00 -3.26 4.00 0.356 0.009 -3.623 7.263 + 2457244.50456 0.00 14.87 11.06 0.357 0.011 14.497 -3.808 + 2457244.50106 0.00 22.20 7.14 0.149 0.008 22.045 -15.058 + 2457244.49732 0.00 14.45 -5.44 -0.146 -0.001 14.600 -19.897 + 2457244.49554 0.00 -3.84 -19.33 -0.356 -0.008 -3.478 -15.491 + 2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419 + 2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432 + 2457244.49942 0.00 -29.36 -20.83 -0.193 -0.009 -29.157 8.527 + 2457244.50312 0.00 -24.26 -9.75 0.088 -0.001 -24.348 14.511 + 2457244.50552 0.00 -8.66 4.06 0.327 0.007 -8.996 12.721 + 2457244.50549 0.00 10.14 14.13 0.413 0.012 9.715 3.994 + 2457244.50305 0.00 23.35 15.76 0.306 0.011 23.031 -7.586 + 2457244.49933 0.00 24.78 8.18 0.056 0.006 24.721 -16.601 + 2457244.49609 0.00 13.77 -5.06 -0.221 -0.003 13.994 -18.832 + 2457244.49483 0.00 -4.53 -17.77 -0.394 -0.010 -4.131 -13.237 + 2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432 + 2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335 + 2457244.49907 0.00 -28.17 -17.30 -0.197 -0.009 -27.966 10.874 + 2457244.50285 0.00 -22.96 -5.96 0.090 -0.001 -23.048 16.995 + 2457244.50531 0.00 -7.00 8.16 0.335 0.007 -7.345 15.164 + 2457244.50528 0.00 12.23 18.47 0.423 0.012 11.795 6.238 + 2457244.50278 0.00 25.74 20.13 0.313 0.012 25.416 -5.607 + 2457244.49898 0.00 27.21 12.38 0.057 0.006 27.144 -14.829 + 2457244.49566 0.00 15.94 -1.17 -0.226 -0.003 16.172 -17.111 + 2457244.49437 0.00 -2.78 -14.17 -0.403 -0.010 -2.368 -11.387 + 2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335 + 2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776 + 2457244.49875 0.00 -25.73 -12.99 -0.193 -0.009 -25.525 12.734 + 2457244.50246 0.00 -20.63 -1.91 0.088 -0.001 -20.716 18.719 + 2457244.50485 0.00 -5.03 11.90 0.327 0.007 -5.365 16.928 + 2457244.50482 0.00 13.77 21.97 0.413 0.012 13.347 8.202 + 2457244.50238 0.00 26.98 23.60 0.306 0.011 26.663 -3.378 + 2457244.49867 0.00 28.41 16.02 0.056 0.005 28.353 -12.393 + 2457244.49542 0.00 17.40 2.78 -0.221 -0.003 17.625 -14.625 + 2457244.49416 0.00 -0.90 -9.93 -0.394 -0.010 -0.499 -9.029 + 2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776 + 2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808 + 2457244.49894 0.00 -22.20 -7.14 -0.149 -0.008 -22.045 15.058 + 2457244.50268 0.00 -14.45 5.44 0.146 0.001 -14.600 19.897 + 2457244.50446 0.00 3.84 19.33 0.356 0.008 3.478 15.491 + 2457244.50325 0.00 21.97 26.39 0.357 0.011 21.598 4.419 + 2457244.49975 0.00 29.30 22.47 0.149 0.008 29.146 -6.831 + 2457244.49602 0.00 21.55 9.88 -0.146 -0.001 21.700 -11.670 + 2457244.49423 0.00 3.26 -4.00 -0.356 -0.009 3.623 -7.263 + 2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808 + 2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670 + 2457244.49921 0.00 -17.43 -0.77 -0.096 -0.006 -17.333 16.664 + 2457244.50269 0.00 -6.75 12.83 0.196 0.003 -6.949 19.583 + 2457244.50344 0.00 12.88 25.10 0.340 0.009 12.527 12.227 + 2457244.50089 0.00 26.67 26.80 0.228 0.009 26.430 0.137 + 2457244.49696 0.00 24.24 16.65 -0.056 0.002 24.290 -7.584 + 2457244.49461 0.00 7.42 2.29 -0.297 -0.007 7.719 -5.122 + 2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670 + 2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277 + 2457244.49949 0.00 -11.53 5.78 -0.038 -0.004 -11.489 17.311 + 2457244.50232 0.00 1.84 19.37 0.221 0.004 1.612 17.533 + 2457244.50165 0.00 19.84 27.56 0.259 0.008 19.573 7.721 + 2457244.49814 0.00 24.47 22.16 0.038 0.004 24.433 -2.313 + 2457244.49530 0.00 11.11 8.57 -0.221 -0.005 11.332 -2.534 + 2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277 + 2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560 + 2457244.50041 0.00 -2.13 15.06 0.078 0.000 -2.211 17.194 + 2457244.50071 0.00 17.41 26.30 0.192 0.006 17.214 8.888 + 2457244.49683 0.00 17.19 17.44 -0.078 -0.001 17.269 0.253 + 2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560 + 2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459 + 2457244.49975 0.00 14.20 23.86 0.115 0.004 14.085 9.656 + 2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459 + 2457244.49805 0.00 6.84 16.77 -0.034 -0.001 6.874 9.935 + """ + vhs_iraf = [] + for line in rvcorr_result.strip().split('\n'): + if not line.strip().startswith('#'): + vhs_iraf.append(float(line.split()[2])) + vhs_iraf = vhs_iraf*u.km/u.s + + targets = SkyCoord(_get_test_input_radecs(), obstime=test_input_time, + location=test_input_loc) + vhs_astropy = targets.radial_velocity_correction('heliocentric') + assert_quantity_allclose(vhs_astropy, vhs_iraf, atol=150*u.m/u.s) + return vhs_astropy, vhs_iraf # for interactively examination + + +def generate_IRAF_input(writefn=None): + dt = test_input_time.utc.datetime + + coos = _get_test_input_radecs() + + lines = [] + for ra, dec in zip(coos.ra, coos.dec): + rastr = Angle(ra).to_string(u.hour, sep=':') + decstr = Angle(dec).to_string(u.deg, sep=':') + + msg = '{yr} {mo} {day} {uth}:{utmin} {ra} {dec}' + lines.append(msg.format(yr=dt.year, mo=dt.month, day=dt.day, + uth=dt.hour, utmin=dt.minute, + ra=rastr, dec=decstr)) + if writefn: + with open(writefn, 'w') as f: + for l in lines: + f.write(l) + else: + for l in lines: + print(l) + print('Run IRAF as:\nastutil\nrvcorrect f= observatory=Paranal') + + +def _get_test_input_radecs(): + ras = [] + decs = [] + + for dec in np.linspace(-85, 85, 15): + nra = int(np.round(10*np.cos(dec*u.deg)).value) + ras1 = np.linspace(-180, 180-1e-6, nra) + ras.extend(ras1) + decs.extend([dec]*len(ras1)) + + return SkyCoord(ra=ras, dec=decs, unit=u.deg) + + +def test_barycorr(): + # this is the result of calling _get_barycorr_bvcs + barycorr_bvcs = u.Quantity([ + -10335.93326096, -14198.47605491, -2237.60012494, -14198.47595363, + -17425.46512587, -17131.70901174, 2424.37095076, 2130.61519166, + -17425.46495779, -19872.50026998, -24442.37091097, -11017.08975893, + 6978.0622355, 11547.93333743, -1877.34772637, -19872.50004258, + -21430.08240017, -27669.14280689, -16917.08506807, 2729.57222968, + 16476.49569232, 13971.97171764, -2898.04250914, -21430.08212368, + -22028.51337105, -29301.92349394, -21481.13036199, -3147.44828909, + 14959.50065514, 22232.91155425, 14412.11903105, -3921.56359768, + -22028.51305781, -21641.01479409, -29373.0512649, -24205.90521765, + -8557.34138828, 10250.50350732, 23417.2299926, 24781.98057941, + 13706.17339044, -4627.70005932, -21641.01445812, -20284.92627505, + -28193.91696959, -22908.51624166, -6901.82132125, 12336.45758056, + 25804.51614607, 27200.50029664, 15871.21385688, -2882.24738355, + -20284.9259314, -18020.92947805, -25752.96564978, -20585.81957567, + -4937.25573801, 13870.58916957, 27037.31568441, 28402.06636994, + 17326.25977035, -1007.62209045, -18020.92914212, -14950.33284575, + -22223.74260839, -14402.94943965, 3930.73265119, 22037.68163353, + 29311.09265126, 21490.30070307, 3156.62229843, -14950.33253252, + -11210.53846867, -17449.59867676, -6697.54090389, 12949.11642965, + 26696.03999586, 24191.5164355, 7321.50355488, -11210.53819218, + -6968.89359681, -11538.76423011, 1886.51695238, 19881.66902396, + 24451.54039956, 11026.26000765, -6968.89336945, -2415.20201758, + -2121.44599781, 17434.63406085, 17140.87871753, -2415.2018495, + 2246.76923076, 14207.64513054, 2246.76933194, 6808.40787728], + u.m/u.s) + + # this tries the *other* way of calling radial_velocity_correction relative + # to the IRAF tests + targets = _get_test_input_radecs() + bvcs_astropy = targets.radial_velocity_correction(obstime=test_input_time, + location=test_input_loc, + kind='barycentric') + + assert_quantity_allclose(bvcs_astropy, barycorr_bvcs, atol=10*u.mm/u.s) + return bvcs_astropy, barycorr_bvcs # for interactively examination + + +def _get_barycorr_bvcs(coos, loc, injupyter=False): + """ + Gets the barycentric correction of the test data from the + http://astroutils.astronomy.ohio-state.edu/exofast/barycorr.html web site. + Requires the https://github.com/tronsgaard/barycorr python interface to that + site. + + Provided to reproduce the test data above, but not required to actually run + the tests. + """ + import barycorr + from ...utils.console import ProgressBar + + bvcs = [] + for ra, dec in ProgressBar(list(zip(coos.ra.deg, coos.dec.deg)), + ipython_widget=injupyter): + res = barycorr.bvc(test_input_time.utc.jd, ra, dec, + lat=loc.geodetic[1].deg, + lon=loc.geodetic[0].deg, + elevation=loc.geodetic[2].to(u.m).value) + bvcs.append(res) + return bvcs*u.m/u.s + + +def test_rvcorr_multiple_obstimes_onskycoord(): + loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m) + arrtime = Time('2005-03-21 00:00:00') + np.linspace(-1, 1, 10)*u.day + + sc = SkyCoord(1*u.deg, 2*u.deg, 100*u.kpc, obstime=arrtime, location=loc) + rvcbary_sc2 = sc.radial_velocity_correction(kind='barycentric') + assert len(rvcbary_sc2) == 10 + + # check the multiple-obstime and multi- mode + sc = SkyCoord(([1]*10)*u.deg, 2*u.deg, 100*u.kpc, + obstime=arrtime, location=loc) + rvcbary_sc3 = sc.radial_velocity_correction(kind='barycentric') + assert len(rvcbary_sc3) == 10 + + +def test_invalid_argument_combos(): + loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m) + time = Time('2005-03-21 00:00:00') + timel = Time('2005-03-21 00:00:00', location=loc) + + scwattrs = SkyCoord(1*u.deg, 2*u.deg, obstime=time, location=loc) + scwoattrs = SkyCoord(1*u.deg, 2*u.deg) + + scwattrs.radial_velocity_correction() + with pytest.raises(ValueError): + scwattrs.radial_velocity_correction(obstime=time, location=loc) + with pytest.raises(TypeError): + scwoattrs.radial_velocity_correction(obstime=time) + + scwoattrs.radial_velocity_correction(obstime=time, location=loc) + with pytest.raises(TypeError): + scwoattrs.radial_velocity_correction() + + with pytest.raises(ValueError): + scwattrs.radial_velocity_correction(timel) diff --git a/astropy/coordinates/tests/utils.py b/astropy/coordinates/tests/utils.py new file mode 100644 index 0000000..34890f1 --- /dev/null +++ b/astropy/coordinates/tests/utils.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import numpy as np + +from ... import units as u +from ...utils import NumpyRNGContext + + +def randomly_sample_sphere(ntosample, randomseed=12345): + """ + Generates a set of spherical coordinates uniformly distributed over the + sphere in a way that gives the same answer for the same seed. Also + generates a random distance vector on [0, 1] (no units) + + This simply returns (lon, lat, r) instead of a representation to avoid + failures due to the representation module. + """ + with NumpyRNGContext(randomseed): + lat = np.arcsin(np.random.rand(ntosample)*2-1) + lon = np.random.rand(ntosample)*np.pi*2 + r = np.random.rand(ntosample) + + return lon*u.rad, lat*u.rad, r diff --git a/astropy/coordinates/transformations.py b/astropy/coordinates/transformations.py new file mode 100644 index 0000000..d6cf5a8 --- /dev/null +++ b/astropy/coordinates/transformations.py @@ -0,0 +1,1327 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module contains a general framework for defining graphs of transformations +between coordinates, suitable for either spatial coordinates or more generalized +coordinate systems. + +The fundamental idea is that each class is a node in the transformation graph, +and transitions from one node to another are defined as functions (or methods) +wrapped in transformation objects. + +This module also includes more specific transformation classes for +celestial/spatial coordinate frames, generally focused around matrix-style +transformations that are typically how the algorithms are defined. +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import heapq +import inspect +import subprocess +from warnings import warn + +from abc import ABCMeta, abstractmethod +from collections import defaultdict, OrderedDict + +import numpy as np + +from .. import units as u +from ..utils.compat import suppress +from ..utils.compat.funcsigs import signature +from ..utils.exceptions import AstropyWarning +from ..extern import six +from ..extern.six.moves import range + +from .representation import REPRESENTATION_CLASSES + +__all__ = ['TransformGraph', 'CoordinateTransform', 'FunctionTransform', + 'BaseAffineTransform', 'AffineTransform', + 'StaticMatrixTransform', 'DynamicMatrixTransform', + 'FunctionTransformWithFiniteDifference', 'CompositeTransform'] + + +class TransformGraph(object): + """ + A graph representing the paths between coordinate frames. + """ + + def __init__(self): + self._graph = defaultdict(dict) + self.invalidate_cache() # generates cache entries + + @property + def _cached_names(self): + if self._cached_names_dct is None: + self._cached_names_dct = dct = {} + for c in self.frame_set: + nm = getattr(c, 'name', None) + if nm is not None: + dct[nm] = c + + return self._cached_names_dct + + @property + def frame_set(self): + """ + A `set` of all the frame classes present in this `TransformGraph`. + """ + if self._cached_frame_set is None: + self._cached_frame_set = frm_set = set() + for a in self._graph: + frm_set.add(a) + for b in self._graph[a]: + frm_set.add(b) + + return self._cached_frame_set.copy() + + @property + def frame_attributes(self): + """ + A `dict` of all the attributes of all frame classes in this `TransformGraph`. + """ + if self._cached_frame_attributes is None: + result = {} + for frame_cls in self.frame_set: + result.update(frame_cls.frame_attributes) + self._cached_frame_attributes = result + + return self._cached_frame_attributes + + def invalidate_cache(self): + """ + Invalidates the cache that stores optimizations for traversing the + transform graph. This is called automatically when transforms + are added or removed, but will need to be called manually if + weights on transforms are modified inplace. + """ + self._cached_names_dct = None + self._cached_frame_set = None + self._cached_frame_attributes = None + self._shortestpaths = {} + self._composite_cache = {} + + def add_transform(self, fromsys, tosys, transform): + """ + Add a new coordinate transformation to the graph. + + Parameters + ---------- + fromsys : class + The coordinate frame class to start from. + tosys : class + The coordinate frame class to transform into. + transform : CoordinateTransform or similar callable + The transformation object. Typically a `CoordinateTransform` object, + although it may be some other callable that is called with the same + signature. + + Raises + ------ + TypeError + If ``fromsys`` or ``tosys`` are not classes or ``transform`` is + not callable. + """ + + if not inspect.isclass(fromsys): + raise TypeError('fromsys must be a class') + if not inspect.isclass(tosys): + raise TypeError('tosys must be a class') + if not six.callable(transform): + raise TypeError('transform must be callable') + + self._graph[fromsys][tosys] = transform + self.invalidate_cache() + + def remove_transform(self, fromsys, tosys, transform): + """ + Removes a coordinate transform from the graph. + + Parameters + ---------- + fromsys : class or `None` + The coordinate frame *class* to start from. If `None`, + ``transform`` will be searched for and removed (``tosys`` must + also be `None`). + tosys : class or `None` + The coordinate frame *class* to transform into. If `None`, + ``transform`` will be searched for and removed (``fromsys`` must + also be `None`). + transform : callable or `None` + The transformation object to be removed or `None`. If `None` + and ``tosys`` and ``fromsys`` are supplied, there will be no + check to ensure the correct object is removed. + """ + if fromsys is None or tosys is None: + if not (tosys is None and fromsys is None): + raise ValueError('fromsys and tosys must both be None if either are') + if transform is None: + raise ValueError('cannot give all Nones to remove_transform') + + # search for the requested transform by brute force and remove it + for a in self._graph: + agraph = self._graph[a] + for b in agraph: + if b is transform: + del agraph[b] + break + else: + raise ValueError('Could not find transform {0} in the ' + 'graph'.format(transform)) + + else: + if transform is None: + self._graph[fromsys].pop(tosys, None) + else: + curr = self._graph[fromsys].get(tosys, None) + if curr is transform: + self._graph[fromsys].pop(tosys) + else: + raise ValueError('Current transform from {0} to {1} is not ' + '{2}'.format(fromsys, tosys, transform)) + self.invalidate_cache() + + def find_shortest_path(self, fromsys, tosys): + """ + Computes the shortest distance along the transform graph from + one system to another. + + Parameters + ---------- + fromsys : class + The coordinate frame class to start from. + tosys : class + The coordinate frame class to transform into. + + Returns + ------- + path : list of classes or `None` + The path from ``fromsys`` to ``tosys`` as an in-order sequence + of classes. This list includes *both* ``fromsys`` and + ``tosys``. Is `None` if there is no possible path. + distance : number + The total distance/priority from ``fromsys`` to ``tosys``. If + priorities are not set this is the number of transforms + needed. Is ``inf`` if there is no possible path. + """ + + inf = float('inf') + + # special-case the 0 or 1-path + if tosys is fromsys: + if tosys not in self._graph[fromsys]: + # Means there's no transform necessary to go from it to itself. + return [tosys], 0 + if tosys in self._graph[fromsys]: + # this will also catch the case where tosys is fromsys, but has + # a defined transform. + t = self._graph[fromsys][tosys] + return [fromsys, tosys], float(t.priority if hasattr(t, 'priority') else 1) + + # otherwise, need to construct the path: + + if fromsys in self._shortestpaths: + # already have a cached result + fpaths = self._shortestpaths[fromsys] + if tosys in fpaths: + return fpaths[tosys] + else: + return None, inf + + # use Dijkstra's algorithm to find shortest path in all other cases + + nodes = [] + # first make the list of nodes + for a in self._graph: + if a not in nodes: + nodes.append(a) + for b in self._graph[a]: + if b not in nodes: + nodes.append(b) + + if fromsys not in nodes or tosys not in nodes: + # fromsys or tosys are isolated or not registered, so there's + # certainly no way to get from one to the other + return None, inf + + edgeweights = {} + # construct another graph that is a dict of dicts of priorities + # (used as edge weights in Dijkstra's algorithm) + for a in self._graph: + edgeweights[a] = aew = {} + agraph = self._graph[a] + for b in agraph: + aew[b] = float(agraph[b].priority if hasattr(agraph[b], 'priority') else 1) + + # entries in q are [distance, count, nodeobj, pathlist] + # count is needed because in py 3.x, tie-breaking fails on the nodes. + # this way, insertion order is preserved if the weights are the same + q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys] + q.insert(0, [0, -1, fromsys, []]) + + # this dict will store the distance to node from ``fromsys`` and the path + result = {} + + # definitely starts as a valid heap because of the insert line; from the + # node to itself is always the shortest distance + while len(q) > 0: + d, orderi, n, path = heapq.heappop(q) + + if d == inf: + # everything left is unreachable from fromsys, just copy them to + # the results and jump out of the loop + result[n] = (None, d) + for d, orderi, n, path in q: + result[n] = (None, d) + break + else: + result[n] = (path, d) + path.append(n) + if n not in edgeweights: + # this is a system that can be transformed to, but not from. + continue + for n2 in edgeweights[n]: + if n2 not in result: # already visited + # find where n2 is in the heap + for i in range(len(q)): + if q[i][2] == n2: + break + else: + raise ValueError('n2 not in heap - this should be impossible!') + + newd = d + edgeweights[n][n2] + if newd < q[i][0]: + q[i][0] = newd + q[i][3] = list(path) + heapq.heapify(q) + + # cache for later use + self._shortestpaths[fromsys] = result + return result[tosys] + + def get_transform(self, fromsys, tosys): + """ + Generates and returns the `CompositeTransform` for a transformation + between two coordinate systems. + + Parameters + ---------- + fromsys : class + The coordinate frame class to start from. + tosys : class + The coordinate frame class to transform into. + + Returns + ------- + trans : `CompositeTransform` or `None` + If there is a path from ``fromsys`` to ``tosys``, this is a + transform object for that path. If no path could be found, this is + `None`. + + Notes + ----- + This function always returns a `CompositeTransform`, because + `CompositeTransform` is slightly more adaptable in the way it can be + called than other transform classes. Specifically, it takes care of + intermediate steps of transformations in a way that is consistent with + 1-hop transformations. + + """ + if not inspect.isclass(fromsys): + raise TypeError('fromsys is not a class') + if not inspect.isclass(tosys): + raise TypeError('tosys is not a class') + + path, distance = self.find_shortest_path(fromsys, tosys) + + if path is None: + return None + + transforms = [] + currsys = fromsys + for p in path[1:]: # first element is fromsys so we skip it + transforms.append(self._graph[currsys][p]) + currsys = p + + fttuple = (fromsys, tosys) + if fttuple not in self._composite_cache: + comptrans = CompositeTransform(transforms, fromsys, tosys, + register_graph=False) + self._composite_cache[fttuple] = comptrans + return self._composite_cache[fttuple] + + def lookup_name(self, name): + """ + Tries to locate the coordinate class with the provided alias. + + Parameters + ---------- + name : str + The alias to look up. + + Returns + ------- + coordcls + The coordinate class corresponding to the ``name`` or `None` if + no such class exists. + """ + + return self._cached_names.get(name, None) + + def get_names(self): + """ + Returns all available transform names. They will all be + valid arguments to `lookup_name`. + + Returns + ------- + nms : list + The aliases for coordinate systems. + """ + return list(six.iterkeys(self._cached_names)) + + def to_dot_graph(self, priorities=True, addnodes=[], savefn=None, + savelayout='plain', saveformat=None, color_edges=True): + """ + Converts this transform graph to the graphviz_ DOT format. + + Optionally saves it (requires `graphviz`_ be installed and on your path). + + .. _graphviz: http://www.graphviz.org/ + + Parameters + ---------- + priorities : bool + If `True`, show the priority values for each transform. Otherwise, + the will not be included in the graph. + addnodes : sequence of str + Additional coordinate systems to add (this can include systems + already in the transform graph, but they will only appear once). + savefn : `None` or str + The file name to save this graph to or `None` to not save + to a file. + savelayout : str + The graphviz program to use to layout the graph (see + graphviz_ for details) or 'plain' to just save the DOT graph + content. Ignored if ``savefn`` is `None`. + saveformat : str + The graphviz output format. (e.g. the ``-Txxx`` option for + the command line program - see graphviz docs for details). + Ignored if ``savefn`` is `None`. + color_edges : bool + Color the edges between two nodes (frames) based on the type of + transform. ``FunctionTransform``: red, ``StaticMatrixTransform``: + blue, ``DynamicMatrixTransform``: green. + + Returns + ------- + dotgraph : str + A string with the DOT format graph. + """ + + nodes = [] + # find the node names + for a in self._graph: + if a not in nodes: + nodes.append(a) + for b in self._graph[a]: + if b not in nodes: + nodes.append(b) + for node in addnodes: + if node not in nodes: + nodes.append(node) + nodenames = [] + invclsaliases = dict([(v, k) for k, v in six.iteritems(self._cached_names)]) + for n in nodes: + if n in invclsaliases: + nodenames.append('{0} [shape=oval label="{0}\\n`{1}`"]'.format(n.__name__, invclsaliases[n])) + else: + nodenames.append(n.__name__ + '[ shape=oval ]') + + edgenames = [] + # Now the edges + for a in self._graph: + agraph = self._graph[a] + for b in agraph: + transform = agraph[b] + pri = transform.priority if hasattr(transform, 'priority') else 1 + color = trans_to_color[transform.__class__] if color_edges else 'black' + edgenames.append((a.__name__, b.__name__, pri, color)) + + # generate simple dot format graph + lines = ['digraph AstropyCoordinateTransformGraph {'] + lines.append('; '.join(nodenames) + ';') + for enm1, enm2, weights, color in edgenames: + labelstr_fmt = '[ {0} {1} ]' + + if priorities: + priority_part = 'label = "{0}"'.format(weights) + else: + priority_part = '' + + color_part = 'color = "{0}"'.format(color) + + labelstr = labelstr_fmt.format(priority_part, color_part) + lines.append('{0} -> {1}{2};'.format(enm1, enm2, labelstr)) + + lines.append('') + lines.append('overlap=false') + lines.append('}') + dotgraph = '\n'.join(lines) + + if savefn is not None: + if savelayout == 'plain': + with open(savefn, 'w') as f: + f.write(dotgraph) + else: + args = [savelayout] + if saveformat is not None: + args.append('-T' + saveformat) + proc = subprocess.Popen(args, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = proc.communicate(dotgraph) + if proc.returncode != 0: + raise IOError('problem running graphviz: \n' + stderr) + + with open(savefn, 'w') as f: + f.write(stdout) + + return dotgraph + + def to_networkx_graph(self): + """ + Converts this transform graph into a networkx graph. + + .. note:: + You must have the `networkx `_ + package installed for this to work. + + Returns + ------- + nxgraph : `networkx.Graph `_ + This `TransformGraph` as a `networkx.Graph`_. + """ + import networkx as nx + + nxgraph = nx.Graph() + + # first make the nodes + for a in self._graph: + if a not in nxgraph: + nxgraph.add_node(a) + for b in self._graph[a]: + if b not in nxgraph: + nxgraph.add_node(b) + + # Now the edges + for a in self._graph: + agraph = self._graph[a] + for b in agraph: + transform = agraph[b] + pri = transform.priority if hasattr(transform, 'priority') else 1 + color = trans_to_color[transform.__class__] + nxgraph.add_edge(a, b, weight=pri, color=color) + + return nxgraph + + def transform(self, transcls, fromsys, tosys, priority=1, **kwargs): + """ + A function decorator for defining transformations. + + .. note:: + If decorating a static method of a class, ``@staticmethod`` + should be added *above* this decorator. + + Parameters + ---------- + transcls : class + The class of the transformation object to create. + fromsys : class + The coordinate frame class to start from. + tosys : class + The coordinate frame class to transform into. + priority : number + The priority if this transform when finding the shortest + coordinate transform path - large numbers are lower priorities. + + Additional keyword arguments are passed into the ``transcls`` + constructor. + + Returns + ------- + deco : function + A function that can be called on another function as a decorator + (see example). + + Notes + ----- + This decorator assumes the first argument of the ``transcls`` + initializer accepts a callable, and that the second and third + are ``fromsys`` and ``tosys``. If this is not true, you should just + initialize the class manually and use `add_transform` instead of + using this decorator. + + Examples + -------- + + :: + + graph = TransformGraph() + + class Frame1(BaseCoordinateFrame): + ... + + class Frame2(BaseCoordinateFrame): + ... + + @graph.transform(FunctionTransform, Frame1, Frame2) + def f1_to_f2(f1_obj): + ... do something with f1_obj ... + return f2_obj + + + """ + def deco(func): + # this doesn't do anything directly with the transform because + # ``register_graph=self`` stores it in the transform graph + # automatically + transcls(func, fromsys, tosys, priority=priority, + register_graph=self, **kwargs) + return func + return deco + + +# <-------------------Define the builtin transform classes--------------------> + +@six.add_metaclass(ABCMeta) +class CoordinateTransform(object): + """ + An object that transforms a coordinate from one system to another. + Subclasses must implement `__call__` with the provided signature. + They should also call this superclass's ``__init__`` in their + ``__init__``. + + Parameters + ---------- + fromsys : class + The coordinate frame class to start from. + tosys : class + The coordinate frame class to transform into. + priority : number + The priority if this transform when finding the shortest + coordinate transform path - large numbers are lower priorities. + register_graph : `TransformGraph` or `None` + A graph to register this transformation with on creation, or + `None` to leave it unregistered. + """ + + def __init__(self, fromsys, tosys, priority=1, register_graph=None): + if not inspect.isclass(fromsys): + raise TypeError('fromsys must be a class') + if not inspect.isclass(tosys): + raise TypeError('tosys must be a class') + + self.fromsys = fromsys + self.tosys = tosys + self.priority = float(priority) + + if register_graph: + # this will do the type-checking when it adds to the graph + self.register(register_graph) + else: + if not inspect.isclass(fromsys) or not inspect.isclass(tosys): + raise TypeError('fromsys and tosys must be classes') + + self.overlapping_frame_attr_names = overlap = [] + if (hasattr(fromsys, 'get_frame_attr_names') and + hasattr(tosys, 'get_frame_attr_names')): + # the if statement is there so that non-frame things might be usable + # if it makes sense + for from_nm in fromsys.get_frame_attr_names(): + if from_nm in tosys.get_frame_attr_names(): + overlap.append(from_nm) + + def register(self, graph): + """ + Add this transformation to the requested Transformation graph, + replacing anything already connecting these two coordinates. + + Parameters + ---------- + graph : a TransformGraph object + The graph to register this transformation with. + """ + graph.add_transform(self.fromsys, self.tosys, self) + + def unregister(self, graph): + """ + Remove this transformation from the requested transformation + graph. + + Parameters + ---------- + graph : a TransformGraph object + The graph to unregister this transformation from. + + Raises + ------ + ValueError + If this is not currently in the transform graph. + """ + graph.remove_transform(self.fromsys, self.tosys, self) + + @abstractmethod + def __call__(self, fromcoord, toframe): + """ + Does the actual coordinate transformation from the ``fromsys`` class to + the ``tosys`` class. + + Parameters + ---------- + fromcoord : fromsys object + An object of class matching ``fromsys`` that is to be transformed. + toframe : object + An object that has the attributes necessary to fully specify the + frame. That is, it must have attributes with names that match the + keys of the dictionary that ``tosys.get_frame_attr_names()`` + returns. Typically this is of class ``tosys``, but it *might* be + some other class as long as it has the appropriate attributes. + + Returns + ------- + tocoord : tosys object + The new coordinate after the transform has been applied. + """ + + +class FunctionTransform(CoordinateTransform): + """ + A coordinate transformation defined by a function that accepts a + coordinate object and returns the transformed coordinate object. + + Parameters + ---------- + func : callable + The transformation function. Should have a call signature + ``func(formcoord, toframe)``. Note that, unlike + `CoordinateTransform.__call__`, ``toframe`` is assumed to be of type + ``tosys`` for this function. + fromsys : class + The coordinate frame class to start from. + tosys : class + The coordinate frame class to transform into. + priority : number + The priority if this transform when finding the shortest + coordinate transform path - large numbers are lower priorities. + register_graph : `TransformGraph` or `None` + A graph to register this transformation with on creation, or + `None` to leave it unregistered. + + Raises + ------ + TypeError + If ``func`` is not callable. + ValueError + If ``func`` cannot accept two arguments. + + + """ + + def __init__(self, func, fromsys, tosys, priority=1, register_graph=None): + if not six.callable(func): + raise TypeError('func must be callable') + + with suppress(TypeError): + sig = signature(func) + kinds = [x.kind for x in sig.parameters.values()] + if (len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2 + and sig.VAR_POSITIONAL not in kinds): + raise ValueError('provided function does not accept two arguments') + + self.func = func + + super(FunctionTransform, self).__init__(fromsys, tosys, + priority=priority, register_graph=register_graph) + + def __call__(self, fromcoord, toframe): + res = self.func(fromcoord, toframe) + if not isinstance(res, self.tosys): + raise TypeError('the transformation function yielded {0} but ' + 'should have been of type {1}'.format(res, self.tosys)) + if fromcoord.data.differentials and not res.data.differentials: + warn("Applied a FunctionTransform to a coordinate frame with " + "differentials, but the FunctionTransform does not handle " + "differentials, so they have been dropped.", AstropyWarning) + return res + + +class FunctionTransformWithFiniteDifference(FunctionTransform): + r""" + A coordinate transformation that works like a `FunctionTransform`, but + computes velocity shifts based on the finite-difference relative to one of + the frame attributes. Note that the transform function should *not* change + the differential at all in this case, as any differentials will be + overridden. + + When a differential is in the from coordinate, the finite difference + calculation has two components. The first part is simple the existing + differential, but re-orientation (using finite-difference techniques) to + point in the direction the velocity vector has in the *new* frame. The + second component is the "induced" velocity. That is, the velocity + intrinsic to the frame itself, estimated by shifting the frame using the + ``finite_difference_frameattr_name`` frame attribute a small amount + (``finite_difference_dt``) in time and re-calculating the position. + + Parameters + ---------- + finite_difference_frameattr_name : str or None + The name of the frame attribute on the frames to use for the finite + difference. Both the to and the from frame will be checked for this + attribute, but only one needs to have it. If None, no velocity + component induced from the frame itself will be included - only the + re-orientation of any exsiting differential. + finite_difference_dt : `~astropy.units.Quantity` or callable + If a quantity, this is the size of the differential used to do the + finite difference. If a callable, should accept + ``(fromcoord, toframe)`` and return the ``dt`` value. + symmetric_finite_difference : bool + If True, the finite difference is computed as + :math:`\frac{x(t + \Delta t / 2) - x(t + \Delta t / 2)}{\Delta t}`, or + if False, :math:`\frac{x(t + \Delta t) - x(t)}{\Delta t}`. The latter + case has slightly better performance (and more stable finite difference + behavior). + + All other parameters are identical to the initializer for + `FunctionTransform`. + + """ + + def __init__(self, func, fromsys, tosys, priority=1, register_graph=None, + finite_difference_frameattr_name='obstime', + finite_difference_dt=1*u.second, + symmetric_finite_difference=True): + super(FunctionTransformWithFiniteDifference, self).__init__(func, + fromsys, tosys, priority, register_graph) + self.finite_difference_frameattr_name = finite_difference_frameattr_name + self.finite_difference_dt = finite_difference_dt + self.symmetric_finite_difference = symmetric_finite_difference + + @property + def finite_difference_frameattr_name(self): + return self._finite_difference_frameattr_name + + @finite_difference_frameattr_name.setter + def finite_difference_frameattr_name(self, value): + if value is None: + self._diff_attr_in_fromsys = self._diff_attr_in_tosys = False + else: + diff_attr_in_fromsys = value in self.fromsys.frame_attributes + diff_attr_in_tosys = value in self.tosys.frame_attributes + if diff_attr_in_fromsys or diff_attr_in_tosys: + self._diff_attr_in_fromsys = diff_attr_in_fromsys + self._diff_attr_in_tosys = diff_attr_in_tosys + else: + raise ValueError('Frame attribute name {} is not a frame ' + 'attribute of {} or {}'.format(value, + self.fromsys, + self.tosys)) + self._finite_difference_frameattr_name = value + + def __call__(self, fromcoord, toframe): + from .representation import (CartesianRepresentation, + CartesianDifferential) + + supcall = self.func + if fromcoord.data.differentials: + # this is the finite difference case + + if callable(self.finite_difference_dt): + dt = self.finite_difference_dt(fromcoord, toframe) + else: + dt = self.finite_difference_dt + halfdt = dt/2 + + from_diffless = fromcoord.realize_frame(fromcoord.data.without_differentials()) + reprwithoutdiff = supcall(from_diffless, toframe) + + # first we use the existing differential to compute an offset due to + # the already-existing velocity, but in the new frame + fromcoord_cart = fromcoord.cartesian + if self.symmetric_finite_difference: + fwdxyz = (fromcoord_cart.xyz + + fromcoord_cart.differentials['s'].d_xyz*halfdt) + fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe) + backxyz = (fromcoord_cart.xyz - + fromcoord_cart.differentials['s'].d_xyz*halfdt) + back = supcall(fromcoord.realize_frame(CartesianRepresentation(backxyz)), toframe) + else: + fwdxyz = (fromcoord_cart.xyz + + fromcoord_cart.differentials['s'].d_xyz*dt) + fwd = supcall(fromcoord.realize_frame(CartesianRepresentation(fwdxyz)), toframe) + back = reprwithoutdiff + diffxyz = (fwd.cartesian - back.cartesian).xyz / dt + + # now we compute the "induced" velocities due to any movement in + # the frame itself over time + attrname = self.finite_difference_frameattr_name + if attrname is not None: + if self.symmetric_finite_difference: + if self._diff_attr_in_fromsys: + kws = {attrname: getattr(from_diffless, attrname) + halfdt} + from_diffless_fwd = from_diffless.replicate(**kws) + else: + from_diffless_fwd = from_diffless + if self._diff_attr_in_tosys: + kws = {attrname: getattr(toframe, attrname) + halfdt} + fwd_frame = toframe.replicate_without_data(**kws) + else: + fwd_frame = toframe + fwd = supcall(from_diffless_fwd, fwd_frame) + + if self._diff_attr_in_fromsys: + kws = {attrname: getattr(from_diffless, attrname) - halfdt} + from_diffless_back = from_diffless.replicate(**kws) + else: + from_diffless_back = from_diffless + if self._diff_attr_in_tosys: + kws = {attrname: getattr(toframe, attrname) - halfdt} + back_frame = toframe.replicate_without_data(**kws) + else: + back_frame = toframe + back = supcall(from_diffless_back, back_frame) + else: + if self._diff_attr_in_fromsys: + kws = {attrname: getattr(from_diffless, attrname) + dt} + from_diffless_fwd = from_diffless.replicate(**kws) + else: + from_diffless_fwd = from_diffless + if self._diff_attr_in_tosys: + kws = {attrname: getattr(toframe, attrname) + dt} + fwd_frame = toframe.replicate_without_data(**kws) + else: + fwd_frame = toframe + fwd = supcall(from_diffless_fwd, fwd_frame) + back = reprwithoutdiff + + diffxyz += (fwd.cartesian - back.cartesian).xyz / dt + + newdiff = CartesianDifferential(diffxyz) + reprwithdiff = reprwithoutdiff.data.to_cartesian().with_differentials(newdiff) + return reprwithoutdiff.realize_frame(reprwithdiff) + else: + return supcall(fromcoord, toframe) + + +class BaseAffineTransform(CoordinateTransform): + """Base class for common functionality between the ``AffineTransform``-type + subclasses. + + This base class is needed because ``AffineTransform`` and the matrix + transform classes share the ``_apply_transform()`` method, but have + different ``__call__()`` methods. ``StaticMatrixTransform`` passes in a + matrix stored as a class attribute, and both of the matrix transforms pass + in ``None`` for the offset. Hence, user subclasses would likely want to + subclass this (rather than ``AffineTransform``) if they want to provide + alternative transformations using this machinery. + """ + + def _apply_transform(self, fromcoord, matrix, offset): + from .representation import (UnitSphericalRepresentation, + CartesianDifferential, + SphericalDifferential, + SphericalCosLatDifferential, + RadialDifferential) + + data = fromcoord.data + has_velocity = 's' in data.differentials + + # list of unit differentials + _unit_diffs = (SphericalDifferential._unit_differential, + SphericalCosLatDifferential._unit_differential) + unit_vel_diff = (has_velocity and + isinstance(data.differentials['s'], _unit_diffs)) + rad_vel_diff = (has_velocity and + isinstance(data.differentials['s'], RadialDifferential)) + + # Some initial checking to short-circuit doing any re-representation if + # we're going to fail anyways: + if isinstance(data, UnitSphericalRepresentation) and offset is not None: + raise TypeError("Position information stored on coordiante frame " + "is insufficient to do a full-space position " + "transformation (representation class: {0})" + .format(data.__class__)) + + elif (has_velocity and (unit_vel_diff or rad_vel_diff) and + offset is not None and 's' in offset.differentials): + # Coordinate has a velocity, but it is not a full-space velocity + # that we need to do a velocity offset + raise TypeError("Velocity information stored on coordinate frame " + "is insufficient to do a full-space velocity " + "transformation (differential class: {0})" + .format(data.differentials['s'].__class__)) + + elif len(data.differentials) > 1: + # We should never get here because the frame initializer shouldn't + # allow more differentials, but this just adds protection for + # subclasses that somehow skip the checks + raise ValueError("Representation passed to AffineTransform contains" + " multiple associated differentials. Only a single" + " differential with velocity units is presently" + " supported (differentials: {0})." + .format(str(data.differentials))) + + # If the representation is a UnitSphericalRepresentation, and this is + # just a MatrixTransform, we have to try to turn the differential into a + # Unit version of the differential (if no radial velocity) or a + # sphericaldifferential with zero proper motion (if only a radial + # velocity) so that the matrix operation works + if (has_velocity and isinstance(data, UnitSphericalRepresentation) and + not unit_vel_diff and not rad_vel_diff): + # retrieve just velocity differential + unit_diff = data.differentials['s'].represent_as( + data.differentials['s']._unit_differential, data) + data = data.with_differentials({'s': unit_diff}) # updates key + + # If it's a RadialDifferential, we flat-out ignore the differentials + # This is because, by this point (past the validation above), we can + # only possibly be doing a rotation-only transformation, and that + # won't change the radial differential. We later add it back in + elif rad_vel_diff: + data = data.without_differentials() + + # Convert the representation and differentials to cartesian without + # having them attached to a frame + rep = data.to_cartesian() + diffs = dict([(k, diff.represent_as(CartesianDifferential, data)) + for k, diff in data.differentials.items()]) + rep = rep.with_differentials(diffs) + + # Only do transform if matrix is specified. This is for speed in + # transformations that only specify an offset (e.g., LSR) + if matrix is not None: + # Note: this applies to both representation and differentials + rep = rep.transform(matrix) + + # TODO: if we decide to allow arithmetic between representations that + # contain differentials, this can be tidied up + if offset is not None: + newrep = (rep.without_differentials() + + offset.without_differentials()) + else: + newrep = rep.without_differentials() + + # We need a velocity (time derivative) and, for now, are strict: the + # representation can only contain a velocity differential and no others. + if has_velocity and not rad_vel_diff: + veldiff = rep.differentials['s'] # already in Cartesian form + + if offset is not None and 's' in offset.differentials: + veldiff = veldiff + offset.differentials['s'] + + newrep = newrep.with_differentials({'s': veldiff}) + + if isinstance(fromcoord.data, UnitSphericalRepresentation): + # Special-case this because otherwise the return object will think + # it has a valid distance with the default return (a + # CartesianRepresentation instance) + + if has_velocity and not unit_vel_diff and not rad_vel_diff: + # We have to first represent as the Unit types we converted to, + # then put the d_distance information back in to the + # differentials and re-represent as their original forms + newdiff = newrep.differentials['s'] + _unit_cls = fromcoord.data.differentials['s']._unit_differential + newdiff = newdiff.represent_as(_unit_cls, newrep) + + kwargs = dict([(comp, getattr(newdiff, comp)) + for comp in newdiff.components]) + kwargs['d_distance'] = fromcoord.data.differentials['s'].d_distance + diffs = {'s': fromcoord.data.differentials['s'].__class__( + copy=False, **kwargs)} + + elif has_velocity and unit_vel_diff: + newdiff = newrep.differentials['s'].represent_as( + fromcoord.data.differentials['s'].__class__, newrep) + diffs = {'s': newdiff} + + else: + diffs = newrep.differentials + + newrep = newrep.represent_as(fromcoord.data.__class__) # drops diffs + newrep = newrep.with_differentials(diffs) + + elif has_velocity and unit_vel_diff: + # Here, we're in the case where the representation is not + # UnitSpherical, but the differential *is* one of the UnitSpherical + # types. We have to convert back to that differential class or the + # resulting frame will think it has a valid radial_velocity. This + # can probably be cleaned up: we currently have to go through the + # dimensional version of the differential before representing as the + # unit differential so that the units work out (the distance length + # unit shouldn't appear in the resulting proper motions) + + diff_cls = fromcoord.data.differentials['s'].__class__ + newrep = newrep.represent_as(fromcoord.data.__class__, + diff_cls._dimensional_differential) + newrep = newrep.represent_as(fromcoord.data.__class__, diff_cls) + + # We pulled the radial differential off of the representation + # earlier, so now we need to put it back. But, in order to do that, we + # have to turn the representation into a repr that is compatible with + # having a RadialDifferential + if has_velocity and rad_vel_diff: + newrep = newrep.represent_as(fromcoord.data.__class__) + newrep = newrep.with_differentials( + {'s': fromcoord.data.differentials['s']}) + + return newrep + + +class AffineTransform(BaseAffineTransform): + """ + A coordinate transformation specified as a function that yields a 3 x 3 + cartesian transformation matrix and a tuple of displacement vectors. + + See `~astropy.coordinates.builtin_frames.galactocentric.Galactocentric` for + an example. + + Parameters + ---------- + transform_func : callable + A callable that has the signature ``transform_func(fromcoord, toframe)`` + and returns: a (3, 3) matrix that operates on ``fromcoord`` in a + Cartesian representation, and a ``CartesianRepresentation`` with + (optionally) an attached velocity ``CartesianDifferential`` to represent + a translation and offset in velocity to apply after the matrix + operation. + fromsys : class + The coordinate frame class to start from. + tosys : class + The coordinate frame class to transform into. + priority : number + The priority if this transform when finding the shortest + coordinate transform path - large numbers are lower priorities. + register_graph : `TransformGraph` or `None` + A graph to register this transformation with on creation, or + `None` to leave it unregistered. + + Raises + ------ + TypeError + If ``transform_func`` is not callable + + """ + + def __init__(self, transform_func, fromsys, tosys, priority=1, + register_graph=None): + + if not six.callable(transform_func): + raise TypeError('transform_func is not callable') + self.transform_func = transform_func + + super(AffineTransform, self).__init__(fromsys, tosys, priority=priority, + register_graph=register_graph) + + def __call__(self, fromcoord, toframe): + + M, vec = self.transform_func(fromcoord, toframe) + newrep = self._apply_transform(fromcoord, M, vec) + + return toframe.realize_frame(newrep) + + +class StaticMatrixTransform(BaseAffineTransform): + """ + A coordinate transformation defined as a 3 x 3 cartesian + transformation matrix. + + This is distinct from DynamicMatrixTransform in that this kind of matrix is + independent of frame attributes. That is, it depends *only* on the class of + the frame. + + Parameters + ---------- + matrix : array-like or callable + A 3 x 3 matrix for transforming 3-vectors. In most cases will + be unitary (although this is not strictly required). If a callable, + will be called *with no arguments* to get the matrix. + fromsys : class + The coordinate frame class to start from. + tosys : class + The coordinate frame class to transform into. + priority : number + The priority if this transform when finding the shortest + coordinate transform path - large numbers are lower priorities. + register_graph : `TransformGraph` or `None` + A graph to register this transformation with on creation, or + `None` to leave it unregistered. + + Raises + ------ + ValueError + If the matrix is not 3 x 3 + + """ + + def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None): + if six.callable(matrix): + matrix = matrix() + self.matrix = np.array(matrix) + + if self.matrix.shape != (3, 3): + raise ValueError('Provided matrix is not 3 x 3') + + super(StaticMatrixTransform, self).__init__(fromsys, tosys, + priority=priority, + register_graph=register_graph) + + def __call__(self, fromcoord, toframe): + newrep = self._apply_transform(fromcoord, self.matrix, None) + return toframe.realize_frame(newrep) + + +class DynamicMatrixTransform(BaseAffineTransform): + """ + A coordinate transformation specified as a function that yields a + 3 x 3 cartesian transformation matrix. + + This is similar to, but distinct from StaticMatrixTransform, in that the + matrix for this class might depend on frame attributes. + + Parameters + ---------- + matrix_func : callable + A callable that has the signature ``matrix_func(fromcoord, toframe)`` and + returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian + representation to the new coordinate system. + fromsys : class + The coordinate frame class to start from. + tosys : class + The coordinate frame class to transform into. + priority : number + The priority if this transform when finding the shortest + coordinate transform path - large numbers are lower priorities. + register_graph : `TransformGraph` or `None` + A graph to register this transformation with on creation, or + `None` to leave it unregistered. + + Raises + ------ + TypeError + If ``matrix_func`` is not callable + + """ + + def __init__(self, matrix_func, fromsys, tosys, priority=1, + register_graph=None): + if not six.callable(matrix_func): + raise TypeError('matrix_func is not callable') + self.matrix_func = matrix_func + + def _transform_func(fromcoord, toframe): + return self.matrix_func(fromcoord, toframe), None + + super(DynamicMatrixTransform, self).__init__(fromsys, tosys, + priority=priority, + register_graph=register_graph) + + def __call__(self, fromcoord, toframe): + M = self.matrix_func(fromcoord, toframe) + newrep = self._apply_transform(fromcoord, M, None) + return toframe.realize_frame(newrep) + + +class CompositeTransform(CoordinateTransform): + """ + A transformation constructed by combining together a series of single-step + transformations. + + Note that the intermediate frame objects are constructed using any frame + attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate + frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame + attributes that are not present use the defaults. + + Parameters + ---------- + transforms : sequence of `CoordinateTransform` objects + The sequence of transformations to apply. + fromsys : class + The coordinate frame class to start from. + tosys : class + The coordinate frame class to transform into. + priority : number + The priority if this transform when finding the shortest + coordinate transform path - large numbers are lower priorities. + register_graph : `TransformGraph` or `None` + A graph to register this transformation with on creation, or + `None` to leave it unregistered. + collapse_static_mats : bool + If `True`, consecutive `StaticMatrixTransform` will be collapsed into a + single transformation to speed up the calculation. + + """ + + def __init__(self, transforms, fromsys, tosys, priority=1, + register_graph=None, collapse_static_mats=True): + super(CompositeTransform, self).__init__(fromsys, tosys, + priority=priority, + register_graph=register_graph) + + if collapse_static_mats: + transforms = self._combine_statics(transforms) + + self.transforms = tuple(transforms) + + def _combine_statics(self, transforms): + """ + Combines together sequences of `StaticMatrixTransform`s into a single + transform and returns it. + """ + newtrans = [] + for currtrans in transforms: + lasttrans = newtrans[-1] if len(newtrans) > 0 else None + + if (isinstance(lasttrans, StaticMatrixTransform) and + isinstance(currtrans, StaticMatrixTransform)): + combinedmat = np.dot(lasttrans.matrix, currtrans.matrix) + newtrans[-1] = StaticMatrixTransform(combinedmat, + lasttrans.fromsys, + currtrans.tosys) + else: + newtrans.append(currtrans) + return newtrans + + def __call__(self, fromcoord, toframe): + curr_coord = fromcoord + for t in self.transforms: + # build an intermediate frame with attributes taken from either + # `fromframe`, or if not there, `toframe`, or if not there, use + # the defaults + # TODO: caching this information when creating the transform may + # speed things up a lot + frattrs = {} + for inter_frame_attr_nm in t.tosys.get_frame_attr_names(): + if hasattr(toframe, inter_frame_attr_nm): + attr = getattr(toframe, inter_frame_attr_nm) + frattrs[inter_frame_attr_nm] = attr + elif hasattr(fromcoord, inter_frame_attr_nm): + attr = getattr(fromcoord, inter_frame_attr_nm) + frattrs[inter_frame_attr_nm] = attr + + curr_toframe = t.tosys(**frattrs) + curr_coord = t(curr_coord, curr_toframe) + + # this is safe even in the case where self.transforms is empty, because + # coordinate objects are immutible, so copying is not needed + return curr_coord + + +# map class names to colorblind-safe colors +trans_to_color = OrderedDict() +trans_to_color[AffineTransform] = '#555555' # gray +trans_to_color[FunctionTransform] = '#783001' # dark red-ish/brown +trans_to_color[FunctionTransformWithFiniteDifference] = '#d95f02' # red-ish +trans_to_color[StaticMatrixTransform] = '#7570b3' # blue-ish +trans_to_color[DynamicMatrixTransform] = '#1b9e77' # green-ish diff --git a/astropy/cosmology/__init__.py b/astropy/cosmology/__init__.py new file mode 100644 index 0000000..63e432e --- /dev/null +++ b/astropy/cosmology/__init__.py @@ -0,0 +1,13 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" astropy.cosmology contains classes and functions for cosmological +distance measures and other cosmology-related calculations. + +See the `Astropy documentation +`_ for more +detailed usage examples and references. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from .core import * +from .funcs import * diff --git a/astropy/cosmology/core.py b/astropy/cosmology/core.py new file mode 100644 index 0000000..f597828 --- /dev/null +++ b/astropy/cosmology/core.py @@ -0,0 +1,2904 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from ..extern import six +from ..extern.six.moves import map + +import sys +from math import sqrt, pi, exp, log, floor +from abc import ABCMeta, abstractmethod + +import numpy as np + +from . import scalar_inv_efuncs + +from .. import constants as const +from .. import units as u +from ..utils import isiterable +from ..utils.compat.funcsigs import signature +from ..utils.state import ScienceState + +from . import parameters + +# Originally authored by Andrew Becker (becker@astro.washington.edu), +# and modified by Neil Crighton (neilcrighton@gmail.com) and Roban +# Kramer (robanhk@gmail.com). + +# Many of these adapted from Hogg 1999, astro-ph/9905116 +# and Linder 2003, PRL 90, 91301 + +__all__ = ["FLRW", "LambdaCDM", "FlatLambdaCDM", "wCDM", "FlatwCDM", + "Flatw0waCDM", "w0waCDM", "wpwaCDM", "w0wzCDM", + "default_cosmology"] + parameters.available + +__doctest_requires__ = {'*': ['scipy.integrate']} + +# Notes about speeding up integrals: +# --------------------------------- +# The supplied cosmology classes use a few tricks to speed +# up distance and time integrals. It is not necessary for +# anyone subclassing FLRW to use these tricks -- but if they +# do, such calculations may be a lot faster. +# The first, more basic, idea is that, in many cases, it's a big deal to +# provide explicit formulae for inv_efunc rather than simply +# setting up de_energy_scale -- assuming there is a nice expression. +# As noted above, almost all of the provided classes do this, and +# that template can pretty much be followed directly with the appropriate +# formula changes. +# The second, and more advanced, option is to also explicitly +# provide a scalar only version of inv_efunc. This results in a fairly +# large speedup (>10x in most cases) in the distance and age integrals, +# even if only done in python, because testing whether the inputs are +# iterable or pure scalars turns out to be rather expensive. To take +# advantage of this, the key thing is to explicitly set the +# instance variables self._inv_efunc_scalar and self._inv_efunc_scalar_args +# in the constructor for the subclass, where the latter are all the +# arguments except z to _inv_efunc_scalar. +# +# The provided classes do use this optimization, and in fact go +# even further and provide optimizations for no radiation, and for radiation +# with massless neutrinos coded in cython. Consult the subclasses for +# details, and scalar_inv_efuncs for the details. +# +# However, the important point is that it is -not- necessary to do this. + +# Some conversion constants -- useful to compute them once here +# and reuse in the initialization rather than have every object do them +# Note that the call to cgs is actually extremely expensive, +# so we actually skip using the units package directly, and +# hardwire the conversion from mks to cgs. This assumes that constants +# will always return mks by default -- if this is made faster for simple +# cases like this, it should be changed back. +# Note that the unit tests should catch it if this happens +H0units_to_invs = (u.km / (u.s * u.Mpc)).to(1.0 / u.s) +sec_to_Gyr = u.s.to(u.Gyr) +# const in critical density in cgs units (g cm^-3) +critdens_const = 3. / (8. * pi * const.G.value * 1000) +arcsec_in_radians = pi / (3600. * 180) +arcmin_in_radians = pi / (60. * 180) +# Radiation parameter over c^2 in cgs (g cm^-3 K^-4) +a_B_c2 = 4e-3 * const.sigma_sb.value / const.c.value ** 3 +# Boltzmann constant in eV / K +kB_evK = const.k_B.to(u.eV / u.K) + + +class CosmologyError(Exception): + pass + + +class Cosmology(object): + """ Placeholder for when a more general Cosmology class is + implemented. """ + + +@six.add_metaclass(ABCMeta) +class FLRW(Cosmology): + """ A class describing an isotropic and homogeneous + (Friedmann-Lemaitre-Robertson-Walker) cosmology. + + This is an abstract base class -- you can't instantiate + examples of this class, but must work with one of its + subclasses such as `LambdaCDM` or `wCDM`. + + Parameters + ---------- + + H0 : float or scalar `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. Note that this does not include + massive neutrinos. + + Ode0 : float + Omega dark energy: density of dark energy in units of the critical + density at z=0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Notes + ----- + Class instances are static -- you can't change the values + of the parameters. That is, all of the attributes above are + read only. + """ + + def __init__(self, H0, Om0, Ode0, Tcmb0=0, Neff=3.04, + m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): + + # all densities are in units of the critical density + self._Om0 = float(Om0) + if self._Om0 < 0.0: + raise ValueError("Matter density can not be negative") + self._Ode0 = float(Ode0) + if Ob0 is not None: + self._Ob0 = float(Ob0) + if self._Ob0 < 0.0: + raise ValueError("Baryonic density can not be negative") + if self._Ob0 > self._Om0: + raise ValueError("Baryonic density can not be larger than " + "total matter density") + self._Odm0 = self._Om0 - self._Ob0 + else: + self._Ob0 = None + self._Odm0 = None + + self._Neff = float(Neff) + if self._Neff < 0.0: + raise ValueError("Effective number of neutrinos can " + "not be negative") + self.name = name + + # Tcmb may have units + self._Tcmb0 = u.Quantity(Tcmb0, unit=u.K, dtype=np.float) + if not self._Tcmb0.isscalar: + raise ValueError("Tcmb0 is a non-scalar quantity") + + # Hubble parameter at z=0, km/s/Mpc + self._H0 = u.Quantity(H0, unit=u.km / u.s / u.Mpc, dtype=np.float) + if not self._H0.isscalar: + raise ValueError("H0 is a non-scalar quantity") + + # 100 km/s/Mpc * h = H0 (so h is dimensionless) + self._h = self._H0.value / 100. + # Hubble distance + self._hubble_distance = (const.c / self._H0).to(u.Mpc) + # H0 in s^-1; don't use units for speed + H0_s = self._H0.value * H0units_to_invs + # Hubble time; again, avoiding units package for speed + self._hubble_time = u.Quantity(sec_to_Gyr / H0_s, u.Gyr) + + # critical density at z=0 (grams per cubic cm) + cd0value = critdens_const * H0_s ** 2 + self._critical_density0 = u.Quantity(cd0value, u.g / u.cm ** 3) + + # Load up neutrino masses. Note: in Py2.x, floor is floating + self._nneutrinos = int(floor(self._Neff)) + + # We are going to share Neff between the neutrinos equally. + # In detail this is not correct, but it is a standard assumption + # because properly calculating it is a) complicated b) depends + # on the details of the massive neutrinos (e.g., their weak + # interactions, which could be unusual if one is considering sterile + # neutrinos) + self._massivenu = False + if self._nneutrinos > 0 and self._Tcmb0.value > 0: + self._neff_per_nu = self._Neff / self._nneutrinos + + # We can't use the u.Quantity constructor as we do above + # because it doesn't understand equivalencies + if not isinstance(m_nu, u.Quantity): + raise ValueError("m_nu must be a Quantity") + + m_nu = m_nu.to(u.eV, equivalencies=u.mass_energy()) + + # Now, figure out if we have massive neutrinos to deal with, + # and, if so, get the right number of masses + # It is worth the effort to keep track of massless ones separately + # (since they are quite easy to deal with, and a common use case + # is to set only one neutrino to have mass) + if m_nu.isscalar: + # Assume all neutrinos have the same mass + if m_nu.value == 0: + self._nmasslessnu = self._nneutrinos + self._nmassivenu = 0 + else: + self._massivenu = True + self._nmasslessnu = 0 + self._nmassivenu = self._nneutrinos + self._massivenu_mass = (m_nu.value * + np.ones(self._nneutrinos)) + else: + # Make sure we have the right number of masses + # -unless- they are massless, in which case we cheat a little + if m_nu.value.min() < 0: + raise ValueError("Invalid (negative) neutrino mass" + " encountered") + if m_nu.value.max() == 0: + self._nmasslessnu = self._nneutrinos + self._nmassivenu = 0 + else: + self._massivenu = True + if len(m_nu) != self._nneutrinos: + errstr = "Unexpected number of neutrino masses" + raise ValueError(errstr) + # Segregate out the massless ones + self._nmasslessnu = len(np.nonzero(m_nu.value == 0)[0]) + self._nmassivenu = self._nneutrinos - self._nmasslessnu + w = np.nonzero(m_nu.value > 0)[0] + self._massivenu_mass = m_nu[w] + + # Compute photon density, Tcmb, neutrino parameters + # Tcmb0=0 removes both photons and neutrinos, is handled + # as a special case for efficiency + if self._Tcmb0.value > 0: + # Compute photon density from Tcmb + self._Ogamma0 = a_B_c2 * self._Tcmb0.value ** 4 /\ + self._critical_density0.value + + # Compute Neutrino temperature + # The constant in front is (4/11)^1/3 -- see any + # cosmology book for an explanation -- for example, + # Weinberg 'Cosmology' p 154 eq (3.1.21) + self._Tnu0 = 0.7137658555036082 * self._Tcmb0 + + # Compute Neutrino Omega and total relativistic component + # for massive neutrinos. We also store a list version, + # since that is more efficient to do integrals with (perhaps + # surprisingly! But small python lists are more efficient + # than small numpy arrays). + if self._massivenu: + nu_y = self._massivenu_mass / (kB_evK * self._Tnu0) + self._nu_y = nu_y.value + self._nu_y_list = self._nu_y.tolist() + self._Onu0 = self._Ogamma0 * self.nu_relative_density(0) + else: + # This case is particularly simple, so do it directly + # The 0.2271... is 7/8 (4/11)^(4/3) -- the temperature + # bit ^4 (blackbody energy density) times 7/8 for + # FD vs. BE statistics. + self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0 + + else: + self._Ogamma0 = 0.0 + self._Tnu0 = u.Quantity(0.0, u.K) + self._Onu0 = 0.0 + + # Compute curvature density + self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0 + + # Subclasses should override this reference if they provide + # more efficient scalar versions of inv_efunc. + self._inv_efunc_scalar = self.inv_efunc + self._inv_efunc_scalar_args = () + + def _namelead(self): + """ Helper function for constructing __repr__""" + if self.name is None: + return "{0}(".format(self.__class__.__name__) + else: + return "{0}(name=\"{1}\", ".format(self.__class__.__name__, + self.name) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, "\ + "Tcmb0={4:.4g}, Neff={5:.3g}, m_nu={6}, "\ + "Ob0={7:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, self._Ode0, + self._Tcmb0, self._Neff, self.m_nu, + _float_or_none(self._Ob0)) + + # Set up a set of properties for H0, Om0, Ode0, Ok0, etc. for user access. + # Note that we don't let these be set (so, obj.Om0 = value fails) + + @property + def H0(self): + """ Return the Hubble constant as an `~astropy.units.Quantity` at z=0""" + return self._H0 + + @property + def Om0(self): + """ Omega matter; matter density/critical density at z=0""" + return self._Om0 + + @property + def Ode0(self): + """ Omega dark energy; dark energy density/critical density at z=0""" + return self._Ode0 + + @property + def Ob0(self): + """ Omega baryon; baryonic matter density/critical density at z=0""" + return self._Ob0 + + @property + def Odm0(self): + """ Omega dark matter; dark matter density/critical density at z=0""" + return self._Odm0 + + @property + def Ok0(self): + """ Omega curvature; the effective curvature density/critical density + at z=0""" + return self._Ok0 + + @property + def Tcmb0(self): + """ Temperature of the CMB as `~astropy.units.Quantity` at z=0""" + return self._Tcmb0 + + @property + def Tnu0(self): + """ Temperature of the neutrino background as `~astropy.units.Quantity` at z=0""" + return self._Tnu0 + + @property + def Neff(self): + """ Number of effective neutrino species""" + return self._Neff + + @property + def has_massive_nu(self): + """ Does this cosmology have at least one massive neutrino species?""" + if self._Tnu0.value == 0: + return False + return self._massivenu + + @property + def m_nu(self): + """ Mass of neutrino species""" + if self._Tnu0.value == 0: + return None + if not self._massivenu: + # Only massless + return u.Quantity(np.zeros(self._nmasslessnu), u.eV, + dtype=np.float) + if self._nmasslessnu == 0: + # Only massive + return u.Quantity(self._massivenu_mass, u.eV, + dtype=np.float) + # A mix -- the most complicated case + numass = np.append(np.zeros(self._nmasslessnu), + self._massivenu_mass.value) + return u.Quantity(numass, u.eV, dtype=np.float) + + @property + def h(self): + """ Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc]""" + return self._h + + @property + def hubble_time(self): + """ Hubble time as `~astropy.units.Quantity`""" + return self._hubble_time + + @property + def hubble_distance(self): + """ Hubble distance as `~astropy.units.Quantity`""" + return self._hubble_distance + + @property + def critical_density0(self): + """ Critical density as `~astropy.units.Quantity` at z=0""" + return self._critical_density0 + + @property + def Ogamma0(self): + """ Omega gamma; the density/critical density of photons at z=0""" + return self._Ogamma0 + + @property + def Onu0(self): + """ Omega nu; the density/critical density of neutrinos at z=0""" + return self._Onu0 + + def clone(self, **kwargs): + """ Returns a copy of this object, potentially with some changes. + + Returns + ------- + newcos : Subclass of FLRW + A new instance of this class with the specified changes. + + Notes + ----- + This assumes that the values of all constructor arguments + are available as properties, which is true of all the provided + subclasses but may not be true of user-provided ones. You can't + change the type of class, so this can't be used to change between + flat and non-flat. If no modifications are requested, then + a reference to this object is returned. + + Examples + -------- + To make a copy of the Planck13 cosmology with a different Omega_m + and a new name: + + >>> from astropy.cosmology import Planck13 + >>> newcos = Planck13.clone(name="Modified Planck 2013", Om0=0.35) + """ + + # Quick return check, taking advantage of the + # immutability of cosmological objects + if len(kwargs) == 0: + return self + + # Get constructor arguments + arglist = signature(self.__init__).parameters.keys() + + # Build the dictionary of values used to construct this + # object. This -assumes- every argument to __init__ has a + # property. This is true of all the classes we provide, but + # maybe a user won't do that. So at least try to have a useful + # error message. + argdict = {} + for arg in arglist: + try: + val = getattr(self, arg) + argdict[arg] = val + except AttributeError: + # We didn't find a property -- complain usefully + errstr = "Object did not have property corresponding "\ + "to constructor argument '{}'; perhaps it is a "\ + "user provided subclass that does not do so" + raise AttributeError(errstr.format(arg)) + + # Now substitute in new arguments + for newarg in kwargs: + if newarg not in argdict: + errstr = "User provided argument '{}' not found in "\ + "constructor for this object" + raise AttributeError(errstr.format(newarg)) + argdict[newarg] = kwargs[newarg] + + return self.__class__(**argdict) + + @abstractmethod + def w(self, z): + """ The dark energy equation of state. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + w : ndarray, or float if input scalar + The dark energy equation of state + + Notes + ----- + The dark energy equation of state is defined as + :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the + pressure at redshift z and :math:`\\rho(z)` is the density + at redshift z, both in units where c=1. + + This must be overridden by subclasses. + """ + raise NotImplementedError("w(z) is not implemented") + + def Om(self, z): + """ Return the density parameter for non-relativistic matter + at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Om : ndarray, or float if input scalar + The density of non-relativistic matter relative to the critical + density at each redshift. + + Notes + ----- + This does not include neutrinos, even if non-relativistic + at the redshift of interest; see `Onu`. + """ + + if isiterable(z): + z = np.asarray(z) + return self._Om0 * (1. + z) ** 3 * self.inv_efunc(z) ** 2 + + def Ob(self, z): + """ Return the density parameter for baryonic matter at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Ob : ndarray, or float if input scalar + The density of baryonic matter relative to the critical density at + each redshift. + + Raises + ------ + ValueError + If Ob0 is None. + """ + + if self._Ob0 is None: + raise ValueError("Baryon density not set for this cosmology") + if isiterable(z): + z = np.asarray(z) + return self._Ob0 * (1. + z) ** 3 * self.inv_efunc(z) ** 2 + + def Odm(self, z): + """ Return the density parameter for dark matter at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Odm : ndarray, or float if input scalar + The density of non-relativistic dark matter relative to the critical + density at each redshift. + + Raises + ------ + ValueError + If Ob0 is None. + Notes + ----- + This does not include neutrinos, even if non-relativistic + at the redshift of interest. + """ + + if self._Odm0 is None: + raise ValueError("Baryonic density not set for this cosmology, " + "unclear meaning of dark matter density") + if isiterable(z): + z = np.asarray(z) + return self._Odm0 * (1. + z) ** 3 * self.inv_efunc(z) ** 2 + + def Ok(self, z): + """ Return the equivalent density parameter for curvature + at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Ok : ndarray, or float if input scalar + The equivalent density parameter for curvature at each redshift. + """ + + if isiterable(z): + z = np.asarray(z) + # Common enough case to be worth checking explicitly + if self._Ok0 == 0: + return np.zeros(np.asanyarray(z).shape, dtype=np.float) + else: + if self._Ok0 == 0: + return 0.0 + + return self._Ok0 * (1. + z) ** 2 * self.inv_efunc(z) ** 2 + + def Ode(self, z): + """ Return the density parameter for dark energy at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Ode : ndarray, or float if input scalar + The density of non-relativistic matter relative to the critical + density at each redshift. + """ + + if isiterable(z): + z = np.asarray(z) + # Common case worth checking + if self._Ode0 == 0: + return np.zeros(np.asanyarray(z).shape, dtype=np.float) + else: + if self._Ode0 == 0: + return 0.0 + + return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2 + + def Ogamma(self, z): + """ Return the density parameter for photons at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Ogamma : ndarray, or float if input scalar + The energy density of photons relative to the critical + density at each redshift. + """ + + if isiterable(z): + z = np.asarray(z) + return self._Ogamma0 * (1. + z) ** 4 * self.inv_efunc(z) ** 2 + + def Onu(self, z): + """ Return the density parameter for neutrinos at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Onu : ndarray, or float if input scalar + The energy density of neutrinos relative to the critical + density at each redshift. Note that this includes their + kinetic energy (if they have mass), so it is not equal to + the commonly used :math:`\\sum \\frac{m_{\\nu}}{94 eV}`, + which does not include kinetic energy. + """ + + if isiterable(z): + z = np.asarray(z) + if self._Onu0 == 0: + return np.zeros(np.asanyarray(z).shape, dtype=np.float) + else: + if self._Onu0 == 0: + return 0.0 + + return self.Ogamma(z) * self.nu_relative_density(z) + + def Tcmb(self, z): + """ Return the CMB temperature at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Tcmb : `~astropy.units.Quantity` + The temperature of the CMB in K. + """ + + if isiterable(z): + z = np.asarray(z) + return self._Tcmb0 * (1. + z) + + def Tnu(self, z): + """ Return the neutrino temperature at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + Tnu : `~astropy.units.Quantity` + The temperature of the cosmic neutrino background in K. + """ + + if isiterable(z): + z = np.asarray(z) + return self._Tnu0 * (1. + z) + + def nu_relative_density(self, z): + """ Neutrino density function relative to the energy density in + photons. + + Parameters + ---------- + z : array like + Redshift + + Returns + ------- + f : ndarray, or float if z is scalar + The neutrino density scaling factor relative to the density + in photons at each redshift + + Notes + ----- + The density in neutrinos is given by + + .. math:: + + \\rho_{\\nu} \\left(a\\right) = 0.2271 \\, N_{eff} \\, + f\\left(m_{\\nu} a / T_{\\nu 0} \\right) \\, + \\rho_{\\gamma} \\left( a \\right) + + where + + .. math:: + + f \\left(y\\right) = \\frac{120}{7 \\pi^4} + \\int_0^{\\infty} \\, dx \\frac{x^2 \\sqrt{x^2 + y^2}} + {e^x + 1} + + assuming that all neutrino species have the same mass. + If they have different masses, a similar term is calculated + for each one. Note that f has the asymptotic behavior :math:`f(0) = 1`. + This method returns :math:`0.2271 f` using an + analytical fitting formula given in Komatsu et al. 2011, ApJS 192, 18. + """ + + # Note that there is also a scalar-z-only cython implementation of + # this in scalar_inv_efuncs.pyx, so if you find a problem in this + # you need to update there too. + + # See Komatsu et al. 2011, eq 26 and the surrounding discussion + # for an explanation of what we are doing here. + # However, this is modified to handle multiple neutrino masses + # by computing the above for each mass, then summing + prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book + + # The massive and massless contribution must be handled separately + # But check for common cases first + if not self._massivenu: + if np.isscalar(z): + return prefac * self._Neff + else: + return prefac * self._Neff *\ + np.ones(np.asanyarray(z).shape, dtype=np.float) + + # These are purely fitting constants -- see the Komatsu paper + p = 1.83 + invp = 0.54644808743 # 1.0 / p + k = 0.3173 + + z = np.asarray(z) + curr_nu_y = self._nu_y / (1. + np.expand_dims(z, axis=-1)) + rel_mass_per = (1.0 + (k * curr_nu_y) ** p) ** invp + rel_mass = rel_mass_per.sum(-1) + self._nmasslessnu + + return prefac * self._neff_per_nu * rel_mass + + def _w_integrand(self, ln1pz): + """ Internal convenience function for w(z) integral.""" + + # See Linder 2003, PRL 90, 91301 eq (5) + # Assumes scalar input, since this should only be called + # inside an integral + + z = exp(ln1pz) - 1.0 + return 1.0 + self.w(z) + + def de_density_scale(self, z): + r""" Evaluates the redshift dependence of the dark energy density. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + I : ndarray, or float if input scalar + The scaling of the energy density of dark energy with redshift. + + Notes + ----- + The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`, + and is given by + + .. math:: + + I = \exp \left( 3 \int_{a}^1 \frac{ da^{\prime} }{ a^{\prime} } + \left[ 1 + w\left( a^{\prime} \right) \right] \right) + + It will generally helpful for subclasses to overload this method if + the integral can be done analytically for the particular dark + energy equation of state that they implement. + """ + + # This allows for an arbitrary w(z) following eq (5) of + # Linder 2003, PRL 90, 91301. The code here evaluates + # the integral numerically. However, most popular + # forms of w(z) are designed to make this integral analytic, + # so it is probably a good idea for subclasses to overload this + # method if an analytic form is available. + # + # The integral we actually use (the one given in Linder) + # is rewritten in terms of z, so looks slightly different than the + # one in the documentation string, but it's the same thing. + + from scipy.integrate import quad + + if isiterable(z): + z = np.asarray(z) + ival = np.array([quad(self._w_integrand, 0, log(1 + redshift))[0] + for redshift in z]) + return np.exp(3 * ival) + else: + ival = quad(self._w_integrand, 0, log(1 + z))[0] + return exp(3 * ival) + + def efunc(self, z): + """ Function used to calculate H(z), the Hubble parameter. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H(z) = H_0 E`. + + It is not necessary to override this method, but if de_density_scale + takes a particularly simple form, it may be advantageous to. + """ + + if isiterable(z): + z = np.asarray(z) + + Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 + if self._massivenu: + Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + + return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + + Ode0 * self.de_density_scale(z)) + + def inv_efunc(self, z): + """Inverse of efunc. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The redshift scaling of the inverse Hubble constant. + """ + + # Avoid the function overhead by repeating code + if isiterable(z): + z = np.asarray(z) + Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 + if self._massivenu: + Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + + return (zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + + Ode0 * self.de_density_scale(z))**(-0.5) + + def _lookback_time_integrand_scalar(self, z): + """ Integrand of the lookback time. + + Parameters + ---------- + z : float + Input redshift. + + Returns + ------- + I : float + The integrand for the lookback time + + References + ---------- + Eqn 30 from Hogg 1999. + """ + + args = self._inv_efunc_scalar_args + return self._inv_efunc_scalar(z, *args) / (1.0 + z) + + def lookback_time_integrand(self, z): + """ Integrand of the lookback time. + + Parameters + ---------- + z : float or array-like + Input redshift. + + Returns + ------- + I : float or array + The integrand for the lookback time + + References + ---------- + Eqn 30 from Hogg 1999. + """ + + if isiterable(z): + zp1 = 1.0 + np.asarray(z) + else: + zp1 = 1. + z + + return self.inv_efunc(z) / zp1 + + def _abs_distance_integrand_scalar(self, z): + """ Integrand of the absorption distance. + + Parameters + ---------- + z : float + Input redshift. + + Returns + ------- + X : float + The integrand for the absorption distance + + References + ---------- + See Hogg 1999 section 11. + """ + + args = self._inv_efunc_scalar_args + return (1.0 + z) ** 2 * self._inv_efunc_scalar(z, *args) + + def abs_distance_integrand(self, z): + """ Integrand of the absorption distance. + + Parameters + ---------- + z : float or array + Input redshift. + + Returns + ------- + X : float or array + The integrand for the absorption distance + + References + ---------- + See Hogg 1999 section 11. + """ + + if isiterable(z): + zp1 = 1.0 + np.asarray(z) + else: + zp1 = 1. + z + return zp1 ** 2 * self.inv_efunc(z) + + def H(self, z): + """ Hubble parameter (km/s/Mpc) at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + H : `~astropy.units.Quantity` + Hubble parameter at each input redshift. + """ + + return self._H0 * self.efunc(z) + + def scale_factor(self, z): + """ Scale factor at redshift ``z``. + + The scale factor is defined as :math:`a = 1 / (1 + z)`. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + a : ndarray, or float if input scalar + Scale factor at each input redshift. + """ + + if isiterable(z): + z = np.asarray(z) + + return 1. / (1. + z) + + def lookback_time(self, z): + """ Lookback time in Gyr to redshift ``z``. + + The lookback time is the difference between the age of the + Universe now and the age at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar + + Returns + ------- + t : `~astropy.units.Quantity` + Lookback time in Gyr to each input redshift. + + See Also + -------- + z_at_value : Find the redshift corresponding to a lookback time. + """ + + from scipy.integrate import quad + f = lambda red: quad(self._lookback_time_integrand_scalar, 0, red)[0] + return self._hubble_time * vectorize_if_needed(f, z) + + def lookback_distance(self, z): + """ + The lookback distance is the light travel time distance to a given + redshift. It is simply c * lookback_time. It may be used to calculate + the proper distance between two redshifts, e.g. for the mean free path + to ionizing radiation. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar + + Returns + ------- + d : `~astropy.units.Quantity` + Lookback distance in Mpc + """ + return (self.lookback_time(z) * const.c).to(u.Mpc) + + def age(self, z): + """ Age of the universe in Gyr at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + t : `~astropy.units.Quantity` + The age of the universe in Gyr at each input redshift. + + See Also + -------- + z_at_value : Find the redshift corresponding to an age. + """ + + from scipy.integrate import quad + f = lambda red: quad(self._lookback_time_integrand_scalar, + red, np.inf)[0] + return self._hubble_time * vectorize_if_needed(f, z) + + def critical_density(self, z): + """ Critical density in grams per cubic cm at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + rho : `~astropy.units.Quantity` + Critical density in g/cm^3 at each input redshift. + """ + + return self._critical_density0 * (self.efunc(z)) ** 2 + + def comoving_distance(self, z): + """ Comoving line-of-sight distance in Mpc at a given + redshift. + + The comoving distance along the line-of-sight between two + objects remains constant with time for objects in the Hubble + flow. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + Comoving distance in Mpc to each input redshift. + """ + + return self._comoving_distance_z1z2(0, z) + + def _comoving_distance_z1z2(self, z1, z2): + """ Comoving line-of-sight distance in Mpc between objects at + redshifts z1 and z2. + + The comoving distance along the line-of-sight between two + objects remains constant with time for objects in the Hubble + flow. + + Parameters + ---------- + z1, z2 : array-like, shape (N,) + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + Comoving distance in Mpc between each input redshift. + """ + + from scipy.integrate import quad + f = lambda z1, z2: quad(self._inv_efunc_scalar, z1, z2, + args=self._inv_efunc_scalar_args)[0] + return self._hubble_distance * vectorize_if_needed(f, z1, z2) + + def comoving_transverse_distance(self, z): + """ Comoving transverse distance in Mpc at a given redshift. + + This value is the transverse comoving distance at redshift ``z`` + corresponding to an angular separation of 1 radian. This is + the same as the comoving distance if omega_k is zero (as in + the current concordance lambda CDM model). + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + Comoving transverse distance in Mpc at each input redshift. + + Notes + ----- + This quantity also called the 'proper motion distance' in some + texts. + """ + + return self._comoving_transverse_distance_z1z2(0, z) + + def _comoving_transverse_distance_z1z2(self, z1, z2): + """Comoving transverse distance in Mpc between two redshifts. + + This value is the transverse comoving distance at redshift + ``z2`` as seen from redshift ``z1`` corresponding to an + angular separation of 1 radian. This is the same as the + comoving distance if omega_k is zero (as in the current + concordance lambda CDM model). + + Parameters + ---------- + z1, z2 : array-like, shape (N,) + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + Comoving transverse distance in Mpc between input redshift. + + Notes + ----- + This quantity is also called the 'proper motion distance' in + some texts. + + """ + + Ok0 = self._Ok0 + dc = self._comoving_distance_z1z2(z1, z2) + if Ok0 == 0: + return dc + sqrtOk0 = sqrt(abs(Ok0)) + dh = self._hubble_distance + if Ok0 > 0: + return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value) + else: + return dh / sqrtOk0 * np.sin(sqrtOk0 * dc.value / dh.value) + + def angular_diameter_distance(self, z): + """ Angular diameter distance in Mpc at a given redshift. + + This gives the proper (sometimes called 'physical') transverse + distance corresponding to an angle of 1 radian for an object + at redshift ``z``. + + Weinberg, 1972, pp 421-424; Weedman, 1986, pp 65-67; Peebles, + 1993, pp 325-327. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + Angular diameter distance in Mpc at each input redshift. + """ + + if isiterable(z): + z = np.asarray(z) + + return self.comoving_transverse_distance(z) / (1. + z) + + def luminosity_distance(self, z): + """ Luminosity distance in Mpc at redshift ``z``. + + This is the distance to use when converting between the + bolometric flux from an object at redshift ``z`` and its + bolometric luminosity. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + Luminosity distance in Mpc at each input redshift. + + See Also + -------- + z_at_value : Find the redshift corresponding to a luminosity distance. + + References + ---------- + Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62. + """ + + if isiterable(z): + z = np.asarray(z) + + return (1. + z) * self.comoving_transverse_distance(z) + + def angular_diameter_distance_z1z2(self, z1, z2): + """ Angular diameter distance between objects at 2 redshifts. + Useful for gravitational lensing. + + Parameters + ---------- + z1, z2 : array-like, shape (N,) + Input redshifts. z2 must be large than z1. + + Returns + ------- + d : `~astropy.units.Quantity`, shape (N,) or single if input scalar + The angular diameter distance between each input redshift + pair. + + """ + + z1 = np.asanyarray(z1) + z2 = np.asanyarray(z2) + return self._comoving_transverse_distance_z1z2(z1, z2) / (1. + z2) + + def absorption_distance(self, z): + """ Absorption distance at redshift ``z``. + + This is used to calculate the number of objects with some + cross section of absorption and number density intersecting a + sightline per unit redshift path. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : float or ndarray + Absorption distance (dimensionless) at each input redshift. + + References + ---------- + Hogg 1999 Section 11. (astro-ph/9905116) + Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B + """ + + from scipy.integrate import quad + f = lambda red: quad(self._abs_distance_integrand_scalar, 0, red)[0] + return vectorize_if_needed(f, z) + + def distmod(self, z): + """ Distance modulus at redshift ``z``. + + The distance modulus is defined as the (apparent magnitude - + absolute magnitude) for an object at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + distmod : `~astropy.units.Quantity` + Distance modulus at each input redshift, in magnitudes + + See Also + -------- + z_at_value : Find the redshift corresponding to a distance modulus. + """ + + # Remember that the luminosity distance is in Mpc + # Abs is necessary because in certain obscure closed cosmologies + # the distance modulus can be negative -- which is okay because + # it enters as the square. + val = 5. * np.log10(abs(self.luminosity_distance(z).value)) + 25.0 + return u.Quantity(val, u.mag) + + def comoving_volume(self, z): + """ Comoving volume in cubic Mpc at redshift ``z``. + + This is the volume of the universe encompassed by redshifts less + than ``z``. For the case of omega_k = 0 it is a sphere of radius + `comoving_distance` but it is less intuitive + if omega_k is not 0. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + V : `~astropy.units.Quantity` + Comoving volume in :math:`Mpc^3` at each input redshift. + """ + + Ok0 = self._Ok0 + if Ok0 == 0: + return 4. / 3. * pi * self.comoving_distance(z) ** 3 + + dh = self._hubble_distance.value # .value for speed + dm = self.comoving_transverse_distance(z).value + term1 = 4. * pi * dh ** 3 / (2. * Ok0) * u.Mpc ** 3 + term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2) + term3 = sqrt(abs(Ok0)) * dm / dh + + if Ok0 > 0: + return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsinh(term3)) + else: + return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsin(term3)) + + def differential_comoving_volume(self, z): + """Differential comoving volume at redshift z. + + Useful for calculating the effective comoving volume. + For example, allows for integration over a comoving volume + that has a sensitivity function that changes with redshift. + The total comoving volume is given by integrating + differential_comoving_volume to redshift z + and multiplying by a solid angle. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + dV : `~astropy.units.Quantity` + Differential comoving volume per redshift per steradian at + each input redshift.""" + dh = self._hubble_distance + da = self.angular_diameter_distance(z) + zp1 = 1.0 + z + return dh * ((zp1 * da) ** 2.0) / u.Quantity(self.efunc(z), + u.steradian) + + def kpc_comoving_per_arcmin(self, z): + """ Separation in transverse comoving kpc corresponding to an + arcminute at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + The distance in comoving kpc corresponding to an arcmin at each + input redshift. + """ + return (self.comoving_transverse_distance(z).to(u.kpc) * + arcmin_in_radians / u.arcmin) + + def kpc_proper_per_arcmin(self, z): + """ Separation in transverse proper kpc corresponding to an + arcminute at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + d : `~astropy.units.Quantity` + The distance in proper kpc corresponding to an arcmin at each + input redshift. + """ + return (self.angular_diameter_distance(z).to(u.kpc) * + arcmin_in_radians / u.arcmin) + + def arcsec_per_kpc_comoving(self, z): + """ Angular separation in arcsec corresponding to a comoving kpc + at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + theta : `~astropy.units.Quantity` + The angular separation in arcsec corresponding to a comoving kpc + at each input redshift. + """ + return u.arcsec / (self.comoving_transverse_distance(z).to(u.kpc) * + arcsec_in_radians) + + def arcsec_per_kpc_proper(self, z): + """ Angular separation in arcsec corresponding to a proper kpc at + redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. Must be 1D or scalar. + + Returns + ------- + theta : `~astropy.units.Quantity` + The angular separation in arcsec corresponding to a proper kpc + at each input redshift. + """ + return u.arcsec / (self.angular_diameter_distance(z).to(u.kpc) * + arcsec_in_radians) + + +class LambdaCDM(FLRW): + """FLRW cosmology with a cosmological constant and curvature. + + This has no additional attributes beyond those of FLRW. + + Parameters + ---------- + + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + Ode0 : float + Omega dark energy: density of the cosmological constant in units of + the critical density at z=0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import LambdaCDM + >>> cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, Ode0, Tcmb0=0, Neff=3.04, + m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): + + FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, + Ob0=Ob0) + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0 + self._Onu0) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list) + + def w(self, z): + """Returns dark energy equation of state at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + w : ndarray, or float if input scalar + The dark energy equation of state + + Notes + ------ + The dark energy equation of state is defined as + :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the + pressure at redshift z and :math:`\\rho(z)` is the density + at redshift z, both in units where c=1. Here this is + :math:`w(z) = -1`. + """ + + if np.isscalar(z): + return -1.0 + else: + return -1.0 * np.ones(np.asanyarray(z).shape, dtype=np.float) + + def de_density_scale(self, z): + """ Evaluates the redshift dependence of the dark energy density. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + I : ndarray, or float if input scalar + The scaling of the energy density of dark energy with redshift. + + Notes + ----- + The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, + and in this case is given by :math:`I = 1`. + """ + + if np.isscalar(z): + return 1. + else: + return np.ones(np.asanyarray(z).shape, dtype=np.float) + + def efunc(self, z): + """ Function used to calculate H(z), the Hubble parameter. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H(z) = H_0 E`. + """ + + if isiterable(z): + z = np.asarray(z) + + # We override this because it takes a particularly simple + # form for a cosmological constant + Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 + if self._massivenu: + Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + + return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0) + + def inv_efunc(self, z): + r""" Function used to calculate :math:`\frac{1}{H_z}`. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The inverse redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H_z = H_0 / + E`. + """ + + if isiterable(z): + z = np.asarray(z) + Om0, Ode0, Ok0 = self._Om0, self._Ode0, self._Ok0 + if self._massivenu: + Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + + return (zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + Ode0)**(-0.5) + + +class FlatLambdaCDM(LambdaCDM): + """FLRW cosmology with a cosmological constant and no curvature. + + This has no additional attributes beyond those of FLRW. + + Parameters + ---------- + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import FlatLambdaCDM + >>> cosmo = FlatLambdaCDM(H0=70, Om0=0.3) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, Tcmb0=0, Neff=3.04, + m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): + + LambdaCDM.__init__(self, H0, Om0, 0.0, Tcmb0, Neff, m_nu, name=name, + Ob0=Ob0) + # Do some twiddling after the fact to get flatness + self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0 + self._Ok0 = 0.0 + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._Ogamma0 + self._Onu0) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list) + + def efunc(self, z): + """ Function used to calculate H(z), the Hubble parameter. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H(z) = H_0 E`. + """ + + if isiterable(z): + z = np.asarray(z) + + # We override this because it takes a particularly simple + # form for a cosmological constant + Om0, Ode0 = self._Om0, self._Ode0 + if self._massivenu: + Or = self._Ogamma0 * (1 + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + + return np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) + Ode0) + + def inv_efunc(self, z): + r"""Function used to calculate :math:`\frac{1}{H_z}`. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The inverse redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H_z = H_0 / E`. + """ + + if isiterable(z): + z = np.asarray(z) + Om0, Ode0 = self._Om0, self._Ode0 + if self._massivenu: + Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + return (zp1 ** 3 * (Or * zp1 + Om0) + Ode0)**(-0.5) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Tcmb0={3:.4g}, "\ + "Neff={4:.3g}, m_nu={5}, Ob0={6:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, + self._Tcmb0, self._Neff, self.m_nu, + _float_or_none(self._Ob0)) + + +class wCDM(FLRW): + """FLRW cosmology with a constant dark energy equation of state + and curvature. + + This has one additional attribute beyond those of FLRW. + + Parameters + ---------- + + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + Ode0 : float + Omega dark energy: density of dark energy in units of the critical + density at z=0. + + w0 : float, optional + Dark energy equation of state at all redshifts. This is + pressure/density for dark energy in units where c=1. A cosmological + constant has w0=-1.0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import wCDM + >>> cosmo = wCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, Ode0, w0=-1., Tcmb0=0, + Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): + + FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, + Ob0=Ob0) + self._w0 = float(w0) + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._w0) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0 + self._Onu0, + self._w0) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list, self._w0) + + @property + def w0(self): + """ Dark energy equation of state""" + return self._w0 + + def w(self, z): + """Returns dark energy equation of state at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + w : ndarray, or float if input scalar + The dark energy equation of state + + Notes + ------ + The dark energy equation of state is defined as + :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the + pressure at redshift z and :math:`\\rho(z)` is the density + at redshift z, both in units where c=1. Here this is + :math:`w(z) = w_0`. + """ + + if np.isscalar(z): + return self._w0 + else: + return self._w0 * np.ones(np.asanyarray(z).shape, dtype=np.float) + + def de_density_scale(self, z): + """ Evaluates the redshift dependence of the dark energy density. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + I : ndarray, or float if input scalar + The scaling of the energy density of dark energy with redshift. + + Notes + ----- + The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, + and in this case is given by + :math:`I = \\left(1 + z\\right)^{3\\left(1 + w_0\\right)}` + """ + + if isiterable(z): + z = np.asarray(z) + return (1. + z) ** (3. * (1. + self._w0)) + + def efunc(self, z): + """ Function used to calculate H(z), the Hubble parameter. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H(z) = H_0 E`. + """ + + if isiterable(z): + z = np.asarray(z) + Om0, Ode0, Ok0, w0 = self._Om0, self._Ode0, self._Ok0, self._w0 + if self._massivenu: + Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + + return np.sqrt(zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + + Ode0 * zp1 ** (3. * (1. + w0))) + + def inv_efunc(self, z): + r""" Function used to calculate :math:`\frac{1}{H_z}`. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The inverse redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H_z = H_0 / E`. + """ + + if isiterable(z): + z = np.asarray(z) + Om0, Ode0, Ok0, w0 = self._Om0, self._Ode0, self._Ok0, self._w0 + if self._massivenu: + Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1.0 + z + + return (zp1 ** 2 * ((Or * zp1 + Om0) * zp1 + Ok0) + + Ode0 * zp1 ** (3. * (1. + w0)))**(-0.5) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, w0={4:.3g}, "\ + "Tcmb0={5:.4g}, Neff={6:.3g}, m_nu={7}, Ob0={8:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, + self._Ode0, self._w0, self._Tcmb0, self._Neff, + self.m_nu, _float_or_none(self._Ob0)) + + +class FlatwCDM(wCDM): + """FLRW cosmology with a constant dark energy equation of state + and no spatial curvature. + + This has one additional attribute beyond those of FLRW. + + Parameters + ---------- + + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + w0 : float, optional + Dark energy equation of state at all redshifts. This is + pressure/density for dark energy in units where c=1. A cosmological + constant has w0=-1.0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import FlatwCDM + >>> cosmo = FlatwCDM(H0=70, Om0=0.3, w0=-0.9) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, w0=-1., Tcmb0=0, + Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): + + wCDM.__init__(self, H0, Om0, 0.0, w0, Tcmb0, Neff, m_nu, + name=name, Ob0=Ob0) + # Do some twiddling after the fact to get flatness + self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0 + self._Ok0 = 0.0 + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._w0) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._Ogamma0 + self._Onu0, + self._w0) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list, self._w0) + + def efunc(self, z): + """ Function used to calculate H(z), the Hubble parameter. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H(z) = H_0 E`. + """ + + if isiterable(z): + z = np.asarray(z) + Om0, Ode0, w0 = self._Om0, self._Ode0, self._w0 + if self._massivenu: + Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1. + z + + return np.sqrt(zp1 ** 3 * (Or * zp1 + Om0) + + Ode0 * zp1 ** (3. * (1 + w0))) + + def inv_efunc(self, z): + r""" Function used to calculate :math:`\frac{1}{H_z}`. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + E : ndarray, or float if input scalar + The inverse redshift scaling of the Hubble constant. + + Notes + ----- + The return value, E, is defined such that :math:`H_z = H_0 / E`. + """ + + if isiterable(z): + z = np.asarray(z) + Om0, Ode0, w0 = self._Om0, self._Ode0, self._w0 + if self._massivenu: + Or = self._Ogamma0 * (1. + self.nu_relative_density(z)) + else: + Or = self._Ogamma0 + self._Onu0 + zp1 = 1. + z + + return (zp1 ** 3 * (Or * zp1 + Om0) + + Ode0 * zp1 ** (3. * (1. + w0)))**(-0.5) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, w0={3:.3g}, Tcmb0={4:.4g}, "\ + "Neff={5:.3g}, m_nu={6}, Ob0={7:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, self._w0, + self._Tcmb0, self._Neff, self.m_nu, + _float_or_none(self._Ob0)) + + +class w0waCDM(FLRW): + """FLRW cosmology with a CPL dark energy equation of state and curvature. + + The equation for the dark energy equation of state uses the + CPL form as described in Chevallier & Polarski Int. J. Mod. Phys. + D10, 213 (2001) and Linder PRL 90, 91301 (2003): + :math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`. + + Parameters + ---------- + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + Ode0 : float + Omega dark energy: density of dark energy in units of the critical + density at z=0. + + w0 : float, optional + Dark energy equation of state at z=0 (a=1). This is pressure/density + for dark energy in units where c=1. + + wa : float, optional + Negative derivative of the dark energy equation of state with respect + to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import w0waCDM + >>> cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.2) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, Ode0, w0=-1., wa=0., Tcmb0=0, + Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): + + FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, + Ob0=Ob0) + self._w0 = float(w0) + self._wa = float(wa) + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._w0, self._wa) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0 + self._Onu0, + self._w0, self._wa) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list, self._w0, + self._wa) + + @property + def w0(self): + """ Dark energy equation of state at z=0""" + return self._w0 + + @property + def wa(self): + """ Negative derivative of dark energy equation of state w.r.t. a""" + return self._wa + + def w(self, z): + """Returns dark energy equation of state at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + w : ndarray, or float if input scalar + The dark energy equation of state + + Notes + ------ + The dark energy equation of state is defined as + :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the + pressure at redshift z and :math:`\\rho(z)` is the density + at redshift z, both in units where c=1. Here this is + :math:`w(z) = w_0 + w_a (1 - a) = w_0 + w_a \\frac{z}{1+z}`. + """ + + if isiterable(z): + z = np.asarray(z) + + return self._w0 + self._wa * z / (1.0 + z) + + def de_density_scale(self, z): + r""" Evaluates the redshift dependence of the dark energy density. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + I : ndarray, or float if input scalar + The scaling of the energy density of dark energy with redshift. + + Notes + ----- + The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, + and in this case is given by + + .. math:: + + I = \left(1 + z\right)^{3 \left(1 + w_0 + w_a\right)} + \exp \left(-3 w_a \frac{z}{1+z}\right) + + """ + if isiterable(z): + z = np.asarray(z) + zp1 = 1.0 + z + return zp1 ** (3 * (1 + self._w0 + self._wa)) * \ + np.exp(-3 * self._wa * z / zp1) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, "\ + "Ode0={3:.3g}, w0={4:.3g}, wa={5:.3g}, Tcmb0={6:.4g}, "\ + "Neff={7:.3g}, m_nu={8}, Ob0={9:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, + self._Ode0, self._w0, self._wa, + self._Tcmb0, self._Neff, self.m_nu, + _float_or_none(self._Ob0)) + + +class Flatw0waCDM(w0waCDM): + """FLRW cosmology with a CPL dark energy equation of state and no + curvature. + + The equation for the dark energy equation of state uses the + CPL form as described in Chevallier & Polarski Int. J. Mod. Phys. + D10, 213 (2001) and Linder PRL 90, 91301 (2003): + :math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`. + + Parameters + ---------- + + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + w0 : float, optional + Dark energy equation of state at z=0 (a=1). This is pressure/density + for dark energy in units where c=1. + + wa : float, optional + Negative derivative of the dark energy equation of state with respect + to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import Flatw0waCDM + >>> cosmo = Flatw0waCDM(H0=70, Om0=0.3, w0=-0.9, wa=0.2) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, w0=-1., wa=0., Tcmb0=0, + Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None): + + w0waCDM.__init__(self, H0, Om0, 0.0, w0=w0, wa=wa, Tcmb0=Tcmb0, + Neff=Neff, m_nu=m_nu, name=name, Ob0=Ob0) + # Do some twiddling after the fact to get flatness + self._Ode0 = 1.0 - self._Om0 - self._Ogamma0 - self._Onu0 + self._Ok0 = 0.0 + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._w0, self._wa) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._Ogamma0 + self._Onu0, + self._w0, self._wa) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list, self._w0, + self._wa) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, "\ + "w0={3:.3g}, Tcmb0={4:.4g}, Neff={5:.3g}, m_nu={6}, "\ + "Ob0={7:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, self._w0, + self._Tcmb0, self._Neff, self.m_nu, + _float_or_none(self._Ob0)) + + +class wpwaCDM(FLRW): + """FLRW cosmology with a CPL dark energy equation of state, a pivot + redshift, and curvature. + + The equation for the dark energy equation of state uses the + CPL form as described in Chevallier & Polarski Int. J. Mod. Phys. + D10, 213 (2001) and Linder PRL 90, 91301 (2003), but modified + to have a pivot redshift as in the findings of the Dark Energy + Task Force (Albrecht et al. arXiv:0901.0721 (2009)): + :math:`w(a) = w_p + w_a (a_p - a) = w_p + w_a( 1/(1+zp) - 1/(1+z) )`. + + Parameters + ---------- + + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + Ode0 : float + Omega dark energy: density of dark energy in units of the critical + density at z=0. + + wp : float, optional + Dark energy equation of state at the pivot redshift zp. This is + pressure/density for dark energy in units where c=1. + + wa : float, optional + Negative derivative of the dark energy equation of state with respect + to the scale factor. A cosmological constant has wp=-1.0 and wa=0.0. + + zp : float, optional + Pivot redshift -- the redshift where w(z) = wp + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import wpwaCDM + >>> cosmo = wpwaCDM(H0=70, Om0=0.3, Ode0=0.7, wp=-0.9, wa=0.2, zp=0.4) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, Ode0, wp=-1., wa=0., zp=0, + Tcmb0=0, Neff=3.04, m_nu=u.Quantity(0.0, u.eV), + Ob0=None, name=None): + + FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, + Ob0=Ob0) + self._wp = float(wp) + self._wa = float(wa) + self._zp = float(zp) + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + apiv = 1.0 / (1.0 + self._zp) + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._wp, apiv, self._wa) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0 + self._Onu0, + self._wp, apiv, self._wa) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list, self._wp, + apiv, self._wa) + + @property + def wp(self): + """ Dark energy equation of state at the pivot redshift zp""" + return self._wp + + @property + def wa(self): + """ Negative derivative of dark energy equation of state w.r.t. a""" + return self._wa + + @property + def zp(self): + """ The pivot redshift, where w(z) = wp""" + return self._zp + + def w(self, z): + """Returns dark energy equation of state at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + w : ndarray, or float if input scalar + The dark energy equation of state + + Notes + ------ + The dark energy equation of state is defined as + :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the + pressure at redshift z and :math:`\\rho(z)` is the density + at redshift z, both in units where c=1. Here this is + :math:`w(z) = w_p + w_a (a_p - a)` where :math:`a = 1/1+z` + and :math:`a_p = 1 / 1 + z_p`. + """ + + if isiterable(z): + z = np.asarray(z) + + apiv = 1.0 / (1.0 + self._zp) + return self._wp + self._wa * (apiv - 1.0 / (1. + z)) + + def de_density_scale(self, z): + r""" Evaluates the redshift dependence of the dark energy density. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + I : ndarray, or float if input scalar + The scaling of the energy density of dark energy with redshift. + + Notes + ----- + The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, + and in this case is given by + + .. math:: + + a_p = \frac{1}{1 + z_p} + + I = \left(1 + z\right)^{3 \left(1 + w_p + a_p w_a\right)} + \exp \left(-3 w_a \frac{z}{1+z}\right) + """ + + if isiterable(z): + z = np.asarray(z) + zp1 = 1. + z + apiv = 1. / (1. + self._zp) + return zp1 ** (3. * (1. + self._wp + apiv * self._wa)) * \ + np.exp(-3. * self._wa * z / zp1) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, Ode0={3:.3g}, wp={4:.3g}, "\ + "wa={5:.3g}, zp={6:.3g}, Tcmb0={7:.4g}, Neff={8:.3g}, "\ + "m_nu={9}, Ob0={10:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, + self._Ode0, self._wp, self._wa, self._zp, + self._Tcmb0, self._Neff, self.m_nu, + _float_or_none(self._Ob0)) + + +class w0wzCDM(FLRW): + """FLRW cosmology with a variable dark energy equation of state + and curvature. + + The equation for the dark energy equation of state uses the + simple form: :math:`w(z) = w_0 + w_z z`. + + This form is not recommended for z > 1. + + Parameters + ---------- + + H0 : float or `~astropy.units.Quantity` + Hubble constant at z = 0. If a float, must be in [km/sec/Mpc] + + Om0 : float + Omega matter: density of non-relativistic matter in units of the + critical density at z=0. + + Ode0 : float + Omega dark energy: density of dark energy in units of the critical + density at z=0. + + w0 : float, optional + Dark energy equation of state at z=0. This is pressure/density for + dark energy in units where c=1. + + wz : float, optional + Derivative of the dark energy equation of state with respect to z. + A cosmological constant has w0=-1.0 and wz=0.0. + + Tcmb0 : float or scalar `~astropy.units.Quantity`, optional + Temperature of the CMB z=0. If a float, must be in [K]. + Default: 0 [K]. Setting this to zero will turn off both photons + and neutrinos (even massive ones). + + Neff : float, optional + Effective number of Neutrino species. Default 3.04. + + m_nu : `~astropy.units.Quantity`, optional + Mass of each neutrino species. If this is a scalar Quantity, then all + neutrino species are assumed to have that mass. Otherwise, the mass of + each species. The actual number of neutrino species (and hence the + number of elements of m_nu if it is not scalar) must be the floor of + Neff. Typically this means you should provide three neutrino masses + unless you are considering something like a sterile neutrino. + + Ob0 : float or None, optional + Omega baryons: density of baryonic matter in units of the critical + density at z=0. If this is set to None (the default), any + computation that requires its value will raise an exception. + + name : str, optional + Name for this cosmological object. + + Examples + -------- + >>> from astropy.cosmology import w0wzCDM + >>> cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wz=0.2) + + The comoving distance in Mpc at redshift z: + + >>> z = 0.5 + >>> dc = cosmo.comoving_distance(z) + """ + + def __init__(self, H0, Om0, Ode0, w0=-1., wz=0., Tcmb0=0, + Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, + name=None): + + FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name, + Ob0=Ob0) + self._w0 = float(w0) + self._wz = float(wz) + + # Please see "Notes about speeding up integrals" for discussion + # about what is being done here. + if self._Tcmb0.value == 0: + self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_norel + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._w0, self._wz) + elif not self._massivenu: + self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_nomnu + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0 + self._Onu0, + self._w0, self._wz) + else: + self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc + self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0, + self._Ogamma0, self._neff_per_nu, + self._nmasslessnu, + self._nu_y_list, self._w0, + self._wz) + + @property + def w0(self): + """ Dark energy equation of state at z=0""" + return self._w0 + + @property + def wz(self): + """ Derivative of the dark energy equation of state w.r.t. z""" + return self._wz + + def w(self, z): + """Returns dark energy equation of state at redshift ``z``. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + w : ndarray, or float if input scalar + The dark energy equation of state + + Notes + ------ + The dark energy equation of state is defined as + :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the + pressure at redshift z and :math:`\\rho(z)` is the density + at redshift z, both in units where c=1. Here this is given by + :math:`w(z) = w_0 + w_z z`. + """ + + if isiterable(z): + z = np.asarray(z) + + return self._w0 + self._wz * z + + def de_density_scale(self, z): + r""" Evaluates the redshift dependence of the dark energy density. + + Parameters + ---------- + z : array-like + Input redshifts. + + Returns + ------- + I : ndarray, or float if input scalar + The scaling of the energy density of dark energy with redshift. + + Notes + ----- + The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, + and in this case is given by + + .. math:: + + I = \left(1 + z\right)^{3 \left(1 + w_0 - w_z\right)} + \exp \left(-3 w_z z\right) + """ + + if isiterable(z): + z = np.asarray(z) + zp1 = 1. + z + return zp1 ** (3. * (1. + self._w0 - self._wz)) *\ + np.exp(-3. * self._wz * z) + + def __repr__(self): + retstr = "{0}H0={1:.3g}, Om0={2:.3g}, "\ + "Ode0={3:.3g}, w0={4:.3g}, wz={5:.3g} Tcmb0={6:.4g}, "\ + "Neff={7:.3g}, m_nu={8}, Ob0={9:s})" + return retstr.format(self._namelead(), self._H0, self._Om0, + self._Ode0, self._w0, self._wz, self._Tcmb0, + self._Neff, self.m_nu, _float_or_none(self._Ob0)) + + +def _float_or_none(x, digits=3): + """ Helper function to format a variable that can be a float or None""" + if x is None: + return str(x) + fmtstr = "{0:.{digits}g}".format(x, digits=digits) + return fmtstr.format(x) + + +def vectorize_if_needed(func, *x): + """ Helper function to vectorize functions on array inputs""" + if any(map(isiterable, x)): + return np.vectorize(func)(*x) + else: + return func(*x) + + +# Pre-defined cosmologies. This loops over the parameter sets in the +# parameters module and creates a LambdaCDM or FlatLambdaCDM instance +# with the same name as the parameter set in the current module's namespace. +# Note this assumes all the cosmologies in parameters are LambdaCDM, +# which is true at least as of this writing. + +for key in parameters.available: + par = getattr(parameters, key) + if par['flat']: + cosmo = FlatLambdaCDM(par['H0'], par['Om0'], Tcmb0=par['Tcmb0'], + Neff=par['Neff'], + m_nu=u.Quantity(par['m_nu'], u.eV), + name=key, + Ob0=par['Ob0']) + docstr = "{} instance of FlatLambdaCDM cosmology\n\n(from {})" + cosmo.__doc__ = docstr.format(key, par['reference']) + else: + cosmo = LambdaCDM(par['H0'], par['Om0'], par['Ode0'], + Tcmb0=par['Tcmb0'], Neff=par['Neff'], + m_nu=u.Quantity(par['m_nu'], u.eV), name=key, + Ob0=par['Ob0']) + docstr = "{} instance of LambdaCDM cosmology\n\n(from {})" + cosmo.__doc__ = docstr.format(key, par['reference']) + setattr(sys.modules[__name__], key, cosmo) + +# don't leave these variables floating around in the namespace +del key, par, cosmo + +######################################################################### +# The science state below contains the current cosmology. +######################################################################### + + +class default_cosmology(ScienceState): + """ + The default cosmology to use. To change it:: + + >>> from astropy.cosmology import default_cosmology, WMAP7 + >>> with default_cosmology.set(WMAP7): + ... # WMAP7 cosmology in effect + + Or, you may use a string:: + + >>> with default_cosmology.set('WMAP7'): + ... # WMAP7 cosmology in effect + """ + _value = 'WMAP9' + + @staticmethod + def get_cosmology_from_string(arg): + """ Return a cosmology instance from a string. + """ + if arg == 'no_default': + cosmo = None + else: + try: + cosmo = getattr(sys.modules[__name__], arg) + except AttributeError: + s = "Unknown cosmology '{}'. Valid cosmologies:\n{}".format( + arg, parameters.available) + raise ValueError(s) + return cosmo + + @classmethod + def validate(cls, value): + if value is None: + value = 'Planck15' + if isinstance(value, six.string_types): + return cls.get_cosmology_from_string(value) + elif isinstance(value, Cosmology): + return value + else: + raise TypeError("default_cosmology must be a string or Cosmology instance.") diff --git a/astropy/cosmology/funcs.py b/astropy/cosmology/funcs.py new file mode 100644 index 0000000..899ad39 --- /dev/null +++ b/astropy/cosmology/funcs.py @@ -0,0 +1,146 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Convenience functions for `astropy.cosmology`. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import warnings +import numpy as np + +from .core import CosmologyError +from ..units import Quantity + +__all__ = ['z_at_value'] + +__doctest_requires__ = {'*': ['scipy.integrate']} + + +def z_at_value(func, fval, zmin=1e-8, zmax=1000, ztol=1e-8, maxfun=500): + """ Find the redshift ``z`` at which ``func(z) = fval``. + + This finds the redshift at which one of the cosmology functions or + methods (for example Planck13.distmod) is equal to a known value. + + .. warning:: + Make sure you understand the behaviour of the function that you + are trying to invert! Depending on the cosmology, there may not + be a unique solution. For example, in the standard Lambda CDM + cosmology, there are two redshifts which give an angular + diameter distance of 1500 Mpc, z ~ 0.7 and z ~ 3.8. To force + ``z_at_value`` to find the solution you are interested in, use the + ``zmin`` and ``zmax`` keywords to limit the search range (see the + example below). + + Parameters + ---------- + func : function or method + A function that takes a redshift as input. + fval : astropy.Quantity instance + The value of ``func(z)``. + zmin : float, optional + The lower search limit for ``z``. Beware of divergences + in some cosmological functions, such as distance moduli, + at z=0 (default 1e-8). + zmax : float, optional + The upper search limit for ``z`` (default 1000). + ztol : float, optional + The relative error in ``z`` acceptable for convergence. + maxfun : int, optional + The maximum number of function evaluations allowed in the + optimization routine (default 500). + + Returns + ------- + z : float + The redshift ``z`` satisfying ``zmin < z < zmax`` and ``func(z) = + fval`` within ``ztol``. + + Notes + ----- + This works for any arbitrary input cosmology, but is inefficient + if you want to invert a large number of values for the same + cosmology. In this case, it is faster to instead generate an array + of values at many closely-spaced redshifts that cover the relevant + redshift range, and then use interpolation to find the redshift at + each value you're interested in. For example, to efficiently find + the redshifts corresponding to 10^6 values of the distance modulus + in a Planck13 cosmology, you could do the following: + + >>> import astropy.units as u + >>> from astropy.cosmology import Planck13, z_at_value + + Generate 10^6 distance moduli between 24 and 43 for which we + want to find the corresponding redshifts: + + >>> Dvals = (24 + np.random.rand(1e6) * 20) * u.mag + + Make a grid of distance moduli covering the redshift range we + need using 50 equally log-spaced values between zmin and + zmax. We use log spacing to adequately sample the steep part of + the curve at low distance moduli: + + >>> zmin = z_at_value(Planck13.distmod, Dvals.min()) + >>> zmax = z_at_value(Planck13.distmod, Dvals.max()) + >>> zgrid = np.logspace(np.log10(zmin), np.log10(zmax), 50) + >>> Dgrid = Planck13.distmod(zgrid) + + Finally interpolate to find the redshift at each distance modulus: + + >>> zvals = np.interp(Dvals.value, zgrid, Dgrid.value) + + Examples + -------- + >>> import astropy.units as u + >>> from astropy.cosmology import Planck13, z_at_value + + The age and lookback time are monotonic with redshift, and so a + unique solution can be found: + + >>> z_at_value(Planck13.age, 2 * u.Gyr) + 3.19812268... + + The angular diameter is not monotonic however, and there are two + redshifts that give a value of 1500 Mpc. Use the zmin and zmax keywords + to find the one you're interested in: + + >>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc, zmax=1.5) + 0.6812769577... + >>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc, zmin=2.5) + 3.7914913242... + + Also note that the luminosity distance and distance modulus (two + other commonly inverted quantities) are monotonic in flat and open + universes, but not in closed universes. + """ + from scipy.optimize import fminbound + + fval_zmin = func(zmin) + fval_zmax = func(zmax) + if np.sign(fval - fval_zmin) != np.sign(fval_zmax - fval): + warnings.warn("""\ +fval is not bracketed by func(zmin) and func(zmax). This means either +there is no solution, or that there is more than one solution between +zmin and zmax satisfying fval = func(z).""") + + if isinstance(fval_zmin, Quantity): + val = fval.to_value(fval_zmin.unit) + f = lambda z: abs(func(z).value - val) + else: + f = lambda z: abs(func(z) - fval) + + zbest, resval, ierr, ncall = fminbound(f, zmin, zmax, maxfun=maxfun, + full_output=1, xtol=ztol) + + if ierr != 0: + warnings.warn('Maximum number of function calls ({}) reached'.format( + ncall)) + + if np.allclose(zbest, zmax): + raise CosmologyError("Best guess z is very close the upper z limit.\n" + "Try re-running with a different zmax.") + elif np.allclose(zbest, zmin): + raise CosmologyError("Best guess z is very close the lower z limit.\n" + "Try re-running with a different zmin.") + + return zbest diff --git a/astropy/cosmology/parameters.py b/astropy/cosmology/parameters.py new file mode 100644 index 0000000..d86831f --- /dev/null +++ b/astropy/cosmology/parameters.py @@ -0,0 +1,148 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" This module contains dictionaries with sets of parameters for a +given cosmology. + +Each cosmology has the following parameters defined: + + ========== ===================================== + Oc0 Omega cold dark matter at z=0 + Ob0 Omega baryon at z=0 + Om0 Omega matter at z=0 + flat Is this assumed flat? If not, Ode0 must be specified + Ode0 Omega dark energy at z=0 if flat is False + H0 Hubble parameter at z=0 in km/s/Mpc + n Density perturbation spectral index + Tcmb0 Current temperature of the CMB + Neff Effective number of neutrino species + sigma8 Density perturbation amplitude + tau Ionisation optical depth + z_reion Redshift of hydrogen reionisation + t0 Age of the universe in Gyr + reference Reference for the parameters + ========== ===================================== + +The list of cosmologies available are given by the tuple +`available`. Current cosmologies available: + +Planck 2015 (Planck15) parameters from Planck Collaboration 2016, A&A, 594, A13 + (Paper XIII), Table 4 (TT, TE, EE + lowP + lensing + ext) + +Planck 2013 (Planck13) parameters from Planck Collaboration 2014, A&A, 571, A16 + (Paper XVI), Table 5 (Planck + WP + highL + BAO) + +WMAP 9 year (WMAP9) parameters from Hinshaw et al. 2013, ApJS, 208, 19, +doi: 10.1088/0067-0049/208/2/19. Table 4 (WMAP9 + eCMB + BAO + H0) + +WMAP 7 year (WMAP7) parameters from Komatsu et al. 2011, ApJS, 192, 18, +doi: 10.1088/0067-0049/192/2/18. Table 1 (WMAP + BAO + H0 ML). + +WMAP 5 year (WMAP5) parameters from Komatsu et al. 2009, ApJS, 180, 330, +doi: 10.1088/0067-0049/180/2/330. Table 1 (WMAP + BAO + SN ML). + +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +# Note: if you add a new cosmology, please also update the table +# in the 'Built-in Cosmologies' section of astropy/docs/cosmology/index.rst +# in addition to the list above. You also need to add them to the 'available' +# list at the bottom of this file. + +# Planck 2015 paper XII Table 4 final column (best fit) +Planck15 = dict( + Oc0=0.2589, + Ob0=0.04860, + Om0=0.3075, + H0=67.74, + n=0.9667, + sigma8=0.8159, + tau=0.066, + z_reion=8.8, + t0=13.799, + Tcmb0=2.7255, + Neff=3.046, + flat=True, + m_nu=[0., 0., 0.06], + reference=("Planck Collaboration 2016, A&A, 594, A13 (Paper XIII)," + " Table 4 (TT, TE, EE + lowP + lensing + ext)") +) + +# Planck 2013 paper XVI Table 5 penultimate column (best fit) +Planck13 = dict( + Oc0=0.25886, + Ob0=0.048252, + Om0=0.30712, + H0=67.77, + n=0.9611, + sigma8=0.8288, + tau=0.0952, + z_reion=11.52, + t0=13.7965, + Tcmb0=2.7255, + Neff=3.046, + flat=True, + m_nu=[0., 0., 0.06], + reference=("Planck Collaboration 2014, A&A, 571, A16 (Paper XVI)," + " Table 5 (Planck + WP + highL + BAO)") +) + + +WMAP9 = dict( + Oc0=0.2402, + Ob0=0.04628, + Om0=0.2865, + H0=69.32, + n=0.9608, + sigma8=0.820, + tau=0.081, + z_reion=10.1, + t0=13.772, + Tcmb0=2.725, + Neff=3.04, + m_nu=0.0, + flat=True, + reference=("Hinshaw et al. 2013, ApJS, 208, 19, " + "doi: 10.1088/0067-0049/208/2/19. " + "Table 4 (WMAP9 + eCMB + BAO + H0, last column)") +) + +WMAP7 = dict( + Oc0=0.226, + Ob0=0.0455, + Om0=0.272, + H0=70.4, + n=0.967, + sigma8=0.810, + tau=0.085, + z_reion=10.3, + t0=13.76, + Tcmb0=2.725, + Neff=3.04, + m_nu=0.0, + flat=True, + reference=("Komatsu et al. 2011, ApJS, 192, 18, " + "doi: 10.1088/0067-0049/192/2/18. " + "Table 1 (WMAP + BAO + H0 ML).") +) + +WMAP5 = dict( + Oc0=0.231, + Ob0=0.0459, + Om0=0.277, + H0=70.2, + n=0.962, + sigma8=0.817, + tau=0.088, + z_reion=11.3, + t0=13.72, + Tcmb0=2.725, + Neff=3.04, + m_nu=0.0, + flat=True, + reference=("Komatsu et al. 2009, ApJS, 180, 330, " + "doi: 10.1088/0067-0049/180/2/330. " + "Table 1 (WMAP + BAO + SN ML).") +) + +# If new parameters are added, this list must be updated +available = ['Planck15', 'Planck13', 'WMAP9', 'WMAP7', 'WMAP5'] diff --git a/astropy/cosmology/scalar_inv_efuncs.c b/astropy/cosmology/scalar_inv_efuncs.c new file mode 100644 index 0000000..2bf25e0 --- /dev/null +++ b/astropy/cosmology/scalar_inv_efuncs.c @@ -0,0 +1,8546 @@ +/* Generated by Cython 0.27.3 */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_27_3" +#define CYTHON_FUTURE_DIVISION 0 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + + +#define __PYX_ERR(f_index, lineno, Ln_error) \ +{ \ + __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ +} + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__astropy__cosmology__scalar_inv_efuncs +#define __PYX_HAVE_API__astropy__cosmology__scalar_inv_efuncs +#include +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + + +static const char *__pyx_f[] = { + "astropy/cosmology/scalar_inv_efuncs.pyx", +}; + +/*--- Type declarations ---*/ + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* PyFloatBinop.proto */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyFloat_AddCObj(PyObject *op1, PyObject *op2, double floatval, int inplace); +#else +#define __Pyx_PyFloat_AddCObj(op1, op2, floatval, inplace)\ + (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) +#endif + +/* GetItemInt.proto */ +#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ + __Pyx_GetItemInt_Generic(o, to_py_func(i)))) +#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, + int is_list, int wraparound, int boundscheck); + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + + +/* Module declarations from 'cython' */ + +/* Module declarations from 'libc.math' */ + +/* Module declarations from 'astropy.cosmology.scalar_inv_efuncs' */ +static PyObject *__pyx_f_7astropy_9cosmology_17scalar_inv_efuncs_nufunc(double, double, int, PyObject *); /*proto*/ +#define __Pyx_MODULE_NAME "astropy.cosmology.scalar_inv_efuncs" +extern int __pyx_module_is_main_astropy__cosmology__scalar_inv_efuncs; +int __pyx_module_is_main_astropy__cosmology__scalar_inv_efuncs = 0; + +/* Implementation of 'astropy.cosmology.scalar_inv_efuncs' */ +static PyObject *__pyx_builtin_range; +static const char __pyx_k_z[] = "z"; +static const char __pyx_k_w0[] = "w0"; +static const char __pyx_k_wa[] = "wa"; +static const char __pyx_k_wp[] = "wp"; +static const char __pyx_k_wz[] = "wz"; +static const char __pyx_k_Ok0[] = "Ok0"; +static const char __pyx_k_Om0[] = "Om0"; +static const char __pyx_k_Or0[] = "Or0"; +static const char __pyx_k_opz[] = "opz"; +static const char __pyx_k_Ode0[] = "Ode0"; +static const char __pyx_k_apiv[] = "apiv"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_nu_y[] = "nu_y"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_Odescl[] = "Odescl"; +static const char __pyx_k_Ogamma0[] = "Ogamma0"; +static const char __pyx_k_NeffPerNu[] = "NeffPerNu"; +static const char __pyx_k_nmasslessnu[] = "nmasslessnu"; +static const char __pyx_k_lcdm_inv_efunc[] = "lcdm_inv_efunc"; +static const char __pyx_k_wcdm_inv_efunc[] = "wcdm_inv_efunc"; +static const char __pyx_k_flcdm_inv_efunc[] = "flcdm_inv_efunc"; +static const char __pyx_k_fwcdm_inv_efunc[] = "fwcdm_inv_efunc"; +static const char __pyx_k_w0wacdm_inv_efunc[] = "w0wacdm_inv_efunc"; +static const char __pyx_k_w0wzcdm_inv_efunc[] = "w0wzcdm_inv_efunc"; +static const char __pyx_k_wpwacdm_inv_efunc[] = "wpwacdm_inv_efunc"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_fw0wacdm_inv_efunc[] = "fw0wacdm_inv_efunc"; +static const char __pyx_k_lcdm_inv_efunc_nomnu[] = "lcdm_inv_efunc_nomnu"; +static const char __pyx_k_lcdm_inv_efunc_norel[] = "lcdm_inv_efunc_norel"; +static const char __pyx_k_wcdm_inv_efunc_nomnu[] = "wcdm_inv_efunc_nomnu"; +static const char __pyx_k_wcdm_inv_efunc_norel[] = "wcdm_inv_efunc_norel"; +static const char __pyx_k_flcdm_inv_efunc_nomnu[] = "flcdm_inv_efunc_nomnu"; +static const char __pyx_k_flcdm_inv_efunc_norel[] = "flcdm_inv_efunc_norel"; +static const char __pyx_k_fwcdm_inv_efunc_nomnu[] = "fwcdm_inv_efunc_nomnu"; +static const char __pyx_k_fwcdm_inv_efunc_norel[] = "fwcdm_inv_efunc_norel"; +static const char __pyx_k_w0wacdm_inv_efunc_nomnu[] = "w0wacdm_inv_efunc_nomnu"; +static const char __pyx_k_w0wacdm_inv_efunc_norel[] = "w0wacdm_inv_efunc_norel"; +static const char __pyx_k_w0wzcdm_inv_efunc_nomnu[] = "w0wzcdm_inv_efunc_nomnu"; +static const char __pyx_k_w0wzcdm_inv_efunc_norel[] = "w0wzcdm_inv_efunc_norel"; +static const char __pyx_k_wpwacdm_inv_efunc_nomnu[] = "wpwacdm_inv_efunc_nomnu"; +static const char __pyx_k_wpwacdm_inv_efunc_norel[] = "wpwacdm_inv_efunc_norel"; +static const char __pyx_k_fw0wacdm_inv_efunc_nomnu[] = "fw0wacdm_inv_efunc_nomnu"; +static const char __pyx_k_fw0wacdm_inv_efunc_norel[] = "fw0wacdm_inv_efunc_norel"; +static const char __pyx_k_Cython_inverse_efuncs_for_cosmo[] = " Cython inverse efuncs for cosmology integrals"; +static const char __pyx_k_astropy_cosmology_scalar_inv_efu[] = "astropy/cosmology/scalar_inv_efuncs.pyx"; +static const char __pyx_k_astropy_cosmology_scalar_inv_efu_2[] = "astropy.cosmology.scalar_inv_efuncs"; +static PyObject *__pyx_n_s_NeffPerNu; +static PyObject *__pyx_n_s_Ode0; +static PyObject *__pyx_n_s_Odescl; +static PyObject *__pyx_n_s_Ogamma0; +static PyObject *__pyx_n_s_Ok0; +static PyObject *__pyx_n_s_Om0; +static PyObject *__pyx_n_s_Or0; +static PyObject *__pyx_n_s_apiv; +static PyObject *__pyx_kp_s_astropy_cosmology_scalar_inv_efu; +static PyObject *__pyx_n_s_astropy_cosmology_scalar_inv_efu_2; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_flcdm_inv_efunc; +static PyObject *__pyx_n_s_flcdm_inv_efunc_nomnu; +static PyObject *__pyx_n_s_flcdm_inv_efunc_norel; +static PyObject *__pyx_n_s_fw0wacdm_inv_efunc; +static PyObject *__pyx_n_s_fw0wacdm_inv_efunc_nomnu; +static PyObject *__pyx_n_s_fw0wacdm_inv_efunc_norel; +static PyObject *__pyx_n_s_fwcdm_inv_efunc; +static PyObject *__pyx_n_s_fwcdm_inv_efunc_nomnu; +static PyObject *__pyx_n_s_fwcdm_inv_efunc_norel; +static PyObject *__pyx_n_s_lcdm_inv_efunc; +static PyObject *__pyx_n_s_lcdm_inv_efunc_nomnu; +static PyObject *__pyx_n_s_lcdm_inv_efunc_norel; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_nmasslessnu; +static PyObject *__pyx_n_s_nu_y; +static PyObject *__pyx_n_s_opz; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_n_s_w0; +static PyObject *__pyx_n_s_w0wacdm_inv_efunc; +static PyObject *__pyx_n_s_w0wacdm_inv_efunc_nomnu; +static PyObject *__pyx_n_s_w0wacdm_inv_efunc_norel; +static PyObject *__pyx_n_s_w0wzcdm_inv_efunc; +static PyObject *__pyx_n_s_w0wzcdm_inv_efunc_nomnu; +static PyObject *__pyx_n_s_w0wzcdm_inv_efunc_norel; +static PyObject *__pyx_n_s_wa; +static PyObject *__pyx_n_s_wcdm_inv_efunc; +static PyObject *__pyx_n_s_wcdm_inv_efunc_nomnu; +static PyObject *__pyx_n_s_wcdm_inv_efunc_norel; +static PyObject *__pyx_n_s_wp; +static PyObject *__pyx_n_s_wpwacdm_inv_efunc; +static PyObject *__pyx_n_s_wpwacdm_inv_efunc_nomnu; +static PyObject *__pyx_n_s_wpwacdm_inv_efunc_norel; +static PyObject *__pyx_n_s_wz; +static PyObject *__pyx_n_s_z; +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_lcdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_2lcdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Or0); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_4lcdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_6flcdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_8flcdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Or0); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_10flcdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_12wcdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_w0); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_14wcdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Or0, double __pyx_v_w0); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_16wcdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y, double __pyx_v_w0); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_18fwcdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_w0); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_20fwcdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Or0, double __pyx_v_w0); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_22fwcdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y, double __pyx_v_w0); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_24w0wacdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_w0, double __pyx_v_wa); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_26w0wacdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Or0, double __pyx_v_w0, double __pyx_v_wa); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_28w0wacdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y, double __pyx_v_w0, double __pyx_v_wa); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_30fw0wacdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_w0, double __pyx_v_wa); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_32fw0wacdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Or0, double __pyx_v_w0, double __pyx_v_wa); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_34fw0wacdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y, double __pyx_v_w0, double __pyx_v_wa); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_36wpwacdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_wp, double __pyx_v_apiv, double __pyx_v_wa); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_38wpwacdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Or0, double __pyx_v_wp, double __pyx_v_apiv, double __pyx_v_wa); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_40wpwacdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y, double __pyx_v_wp, double __pyx_v_apiv, double __pyx_v_wa); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_42w0wzcdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_w0, double __pyx_v_wz); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_44w0wzcdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Or0, double __pyx_v_w0, double __pyx_v_wz); /* proto */ +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_46w0wzcdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y, double __pyx_v_w0, double __pyx_v_wz); /* proto */ +static PyObject *__pyx_float_1_0; +static PyObject *__pyx_tuple_; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__7; +static PyObject *__pyx_tuple__9; +static PyObject *__pyx_tuple__11; +static PyObject *__pyx_tuple__13; +static PyObject *__pyx_tuple__15; +static PyObject *__pyx_tuple__17; +static PyObject *__pyx_tuple__19; +static PyObject *__pyx_tuple__21; +static PyObject *__pyx_tuple__23; +static PyObject *__pyx_tuple__25; +static PyObject *__pyx_tuple__27; +static PyObject *__pyx_tuple__29; +static PyObject *__pyx_tuple__31; +static PyObject *__pyx_tuple__33; +static PyObject *__pyx_tuple__35; +static PyObject *__pyx_tuple__37; +static PyObject *__pyx_tuple__39; +static PyObject *__pyx_tuple__41; +static PyObject *__pyx_tuple__43; +static PyObject *__pyx_tuple__45; +static PyObject *__pyx_tuple__47; +static PyObject *__pyx_codeobj__2; +static PyObject *__pyx_codeobj__4; +static PyObject *__pyx_codeobj__6; +static PyObject *__pyx_codeobj__8; +static PyObject *__pyx_codeobj__10; +static PyObject *__pyx_codeobj__12; +static PyObject *__pyx_codeobj__14; +static PyObject *__pyx_codeobj__16; +static PyObject *__pyx_codeobj__18; +static PyObject *__pyx_codeobj__20; +static PyObject *__pyx_codeobj__22; +static PyObject *__pyx_codeobj__24; +static PyObject *__pyx_codeobj__26; +static PyObject *__pyx_codeobj__28; +static PyObject *__pyx_codeobj__30; +static PyObject *__pyx_codeobj__32; +static PyObject *__pyx_codeobj__34; +static PyObject *__pyx_codeobj__36; +static PyObject *__pyx_codeobj__38; +static PyObject *__pyx_codeobj__40; +static PyObject *__pyx_codeobj__42; +static PyObject *__pyx_codeobj__44; +static PyObject *__pyx_codeobj__46; +static PyObject *__pyx_codeobj__48; + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":22 + * ######### LambdaCDM + * # No relativistic species + * def lcdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0): # <<<<<<<<<<<<<< + * cdef double opz = 1.0 + z + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0, -0.5) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_1lcdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_1lcdm_inv_efunc_norel = {"lcdm_inv_efunc_norel", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_1lcdm_inv_efunc_norel, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_1lcdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("lcdm_inv_efunc_norel (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,0}; + PyObject* values[4] = {0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc_norel", 1, 4, 4, 1); __PYX_ERR(0, 22, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc_norel", 1, 4, 4, 2); __PYX_ERR(0, 22, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc_norel", 1, 4, 4, 3); __PYX_ERR(0, 22, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "lcdm_inv_efunc_norel") < 0)) __PYX_ERR(0, 22, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 22, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 22, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 22, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 22, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc_norel", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 22, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.lcdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_lcdm_inv_efunc_norel(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_lcdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0) { + double __pyx_v_opz; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("lcdm_inv_efunc_norel", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":23 + * # No relativistic species + * def lcdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0, -0.5) + * + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":24 + * def lcdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0): + * cdef double opz = 1.0 + z + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0, -0.5) # <<<<<<<<<<<<<< + * + * # Massless neutrinos + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyFloat_FromDouble(pow(((pow(__pyx_v_opz, 2.0) * ((__pyx_v_opz * __pyx_v_Om0) + __pyx_v_Ok0)) + __pyx_v_Ode0), -0.5)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":22 + * ######### LambdaCDM + * # No relativistic species + * def lcdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0): # <<<<<<<<<<<<<< + * cdef double opz = 1.0 + z + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0, -0.5) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.lcdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":27 + * + * # Massless neutrinos + * def lcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0): + * cdef double opz = 1.0 + z + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_3lcdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_3lcdm_inv_efunc_nomnu = {"lcdm_inv_efunc_nomnu", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_3lcdm_inv_efunc_nomnu, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_3lcdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + double __pyx_v_Or0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("lcdm_inv_efunc_nomnu (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,&__pyx_n_s_Or0,0}; + PyObject* values[5] = {0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc_nomnu", 1, 5, 5, 1); __PYX_ERR(0, 27, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc_nomnu", 1, 5, 5, 2); __PYX_ERR(0, 27, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc_nomnu", 1, 5, 5, 3); __PYX_ERR(0, 27, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Or0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc_nomnu", 1, 5, 5, 4); __PYX_ERR(0, 27, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "lcdm_inv_efunc_nomnu") < 0)) __PYX_ERR(0, 27, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 27, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 27, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 27, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 27, __pyx_L3_error) + __pyx_v_Or0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_Or0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 28, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc_nomnu", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 27, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.lcdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_2lcdm_inv_efunc_nomnu(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0, __pyx_v_Or0); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_2lcdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Or0) { + double __pyx_v_opz; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("lcdm_inv_efunc_nomnu", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":29 + * def lcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, + * double Or0): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0, -0.5) + * + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":30 + * double Or0): + * cdef double opz = 1.0 + z + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0, -0.5) # <<<<<<<<<<<<<< + * + * # With massive neutrinos + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyFloat_FromDouble(pow(((((((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0) * __pyx_v_opz) + __pyx_v_Ok0) * pow(__pyx_v_opz, 2.0)) + __pyx_v_Ode0), -0.5)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":27 + * + * # Massless neutrinos + * def lcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0): + * cdef double opz = 1.0 + z + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.lcdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":33 + * + * # With massive neutrinos + * def lcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y): + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_5lcdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_5lcdm_inv_efunc = {"lcdm_inv_efunc", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_5lcdm_inv_efunc, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_5lcdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + double __pyx_v_Ogamma0; + double __pyx_v_NeffPerNu; + int __pyx_v_nmasslessnu; + PyObject *__pyx_v_nu_y = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("lcdm_inv_efunc (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,&__pyx_n_s_Ogamma0,&__pyx_n_s_NeffPerNu,&__pyx_n_s_nmasslessnu,&__pyx_n_s_nu_y,0}; + PyObject* values[8] = {0,0,0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + CYTHON_FALLTHROUGH; + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc", 1, 8, 8, 1); __PYX_ERR(0, 33, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc", 1, 8, 8, 2); __PYX_ERR(0, 33, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc", 1, 8, 8, 3); __PYX_ERR(0, 33, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ogamma0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc", 1, 8, 8, 4); __PYX_ERR(0, 33, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_NeffPerNu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc", 1, 8, 8, 5); __PYX_ERR(0, 33, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 6: + if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nmasslessnu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc", 1, 8, 8, 6); __PYX_ERR(0, 33, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 7: + if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nu_y)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc", 1, 8, 8, 7); __PYX_ERR(0, 33, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "lcdm_inv_efunc") < 0)) __PYX_ERR(0, 33, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 8) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 33, __pyx_L3_error) + __pyx_v_Ogamma0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_Ogamma0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 34, __pyx_L3_error) + __pyx_v_NeffPerNu = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_NeffPerNu == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 34, __pyx_L3_error) + __pyx_v_nmasslessnu = __Pyx_PyInt_As_int(values[6]); if (unlikely((__pyx_v_nmasslessnu == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 34, __pyx_L3_error) + __pyx_v_nu_y = ((PyObject*)values[7]); + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("lcdm_inv_efunc", 1, 8, 8, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 33, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.lcdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_nu_y), (&PyList_Type), 1, "nu_y", 1))) __PYX_ERR(0, 34, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_4lcdm_inv_efunc(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0, __pyx_v_Ogamma0, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_4lcdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y) { + double __pyx_v_opz; + double __pyx_v_Or0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + double __pyx_t_4; + __Pyx_RefNannySetupContext("lcdm_inv_efunc", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":36 + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y): + * + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0, -0.5) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":37 + * + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) # <<<<<<<<<<<<<< + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0, -0.5) + * + */ + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_Ogamma0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __pyx_f_7astropy_9cosmology_17scalar_inv_efuncs_nufunc(__pyx_v_opz, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyFloat_AddCObj(__pyx_float_1_0, __pyx_t_2, 1.0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyNumber_Multiply(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v_Or0 = __pyx_t_4; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":38 + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0, -0.5) # <<<<<<<<<<<<<< + * + * ######## FlatLambdaCDM + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyFloat_FromDouble(pow(((((((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0) * __pyx_v_opz) + __pyx_v_Ok0) * pow(__pyx_v_opz, 2.0)) + __pyx_v_Ode0), -0.5)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 38, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":33 + * + * # With massive neutrinos + * def lcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y): + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.lcdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":42 + * ######## FlatLambdaCDM + * # No relativistic species + * def flcdm_inv_efunc_norel(double z, double Om0, double Ode0): # <<<<<<<<<<<<<< + * return pow((1. + z)**3 * Om0 + Ode0, -0.5) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_7flcdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_7flcdm_inv_efunc_norel = {"flcdm_inv_efunc_norel", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_7flcdm_inv_efunc_norel, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_7flcdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("flcdm_inv_efunc_norel (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("flcdm_inv_efunc_norel", 1, 3, 3, 1); __PYX_ERR(0, 42, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("flcdm_inv_efunc_norel", 1, 3, 3, 2); __PYX_ERR(0, 42, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "flcdm_inv_efunc_norel") < 0)) __PYX_ERR(0, 42, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 42, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 42, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 42, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("flcdm_inv_efunc_norel", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 42, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.flcdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_6flcdm_inv_efunc_norel(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_6flcdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("flcdm_inv_efunc_norel", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":43 + * # No relativistic species + * def flcdm_inv_efunc_norel(double z, double Om0, double Ode0): + * return pow((1. + z)**3 * Om0 + Ode0, -0.5) # <<<<<<<<<<<<<< + * + * # Massless neutrinos + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyFloat_FromDouble(pow(((pow((1. + __pyx_v_z), 3.0) * __pyx_v_Om0) + __pyx_v_Ode0), -0.5)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 43, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":42 + * ######## FlatLambdaCDM + * # No relativistic species + * def flcdm_inv_efunc_norel(double z, double Om0, double Ode0): # <<<<<<<<<<<<<< + * return pow((1. + z)**3 * Om0 + Ode0, -0.5) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.flcdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":46 + * + * # Massless neutrinos + * def flcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Or0): # <<<<<<<<<<<<<< + * cdef double opz = 1.0 + z + * return pow(opz**3 * (opz * Or0 + Om0) + Ode0, -0.5) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_9flcdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_9flcdm_inv_efunc_nomnu = {"flcdm_inv_efunc_nomnu", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_9flcdm_inv_efunc_nomnu, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_9flcdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Or0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("flcdm_inv_efunc_nomnu (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Or0,0}; + PyObject* values[4] = {0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("flcdm_inv_efunc_nomnu", 1, 4, 4, 1); __PYX_ERR(0, 46, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("flcdm_inv_efunc_nomnu", 1, 4, 4, 2); __PYX_ERR(0, 46, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Or0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("flcdm_inv_efunc_nomnu", 1, 4, 4, 3); __PYX_ERR(0, 46, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "flcdm_inv_efunc_nomnu") < 0)) __PYX_ERR(0, 46, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 46, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 46, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 46, __pyx_L3_error) + __pyx_v_Or0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Or0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 46, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("flcdm_inv_efunc_nomnu", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 46, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.flcdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_8flcdm_inv_efunc_nomnu(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Or0); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_8flcdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Or0) { + double __pyx_v_opz; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("flcdm_inv_efunc_nomnu", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":47 + * # Massless neutrinos + * def flcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Or0): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * return pow(opz**3 * (opz * Or0 + Om0) + Ode0, -0.5) + * + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":48 + * def flcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Or0): + * cdef double opz = 1.0 + z + * return pow(opz**3 * (opz * Or0 + Om0) + Ode0, -0.5) # <<<<<<<<<<<<<< + * + * # With massive neutrinos + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyFloat_FromDouble(pow(((pow(__pyx_v_opz, 3.0) * ((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0)) + __pyx_v_Ode0), -0.5)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 48, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":46 + * + * # Massless neutrinos + * def flcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Or0): # <<<<<<<<<<<<<< + * cdef double opz = 1.0 + z + * return pow(opz**3 * (opz * Or0 + Om0) + Ode0, -0.5) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.flcdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":51 + * + * # With massive neutrinos + * def flcdm_inv_efunc(double z, double Om0, double Ode0, double Ogamma0, # <<<<<<<<<<<<<< + * double NeffPerNu, int nmasslessnu, list nu_y): + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_11flcdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_11flcdm_inv_efunc = {"flcdm_inv_efunc", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_11flcdm_inv_efunc, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_11flcdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ogamma0; + double __pyx_v_NeffPerNu; + int __pyx_v_nmasslessnu; + PyObject *__pyx_v_nu_y = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("flcdm_inv_efunc (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ogamma0,&__pyx_n_s_NeffPerNu,&__pyx_n_s_nmasslessnu,&__pyx_n_s_nu_y,0}; + PyObject* values[7] = {0,0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("flcdm_inv_efunc", 1, 7, 7, 1); __PYX_ERR(0, 51, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("flcdm_inv_efunc", 1, 7, 7, 2); __PYX_ERR(0, 51, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ogamma0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("flcdm_inv_efunc", 1, 7, 7, 3); __PYX_ERR(0, 51, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_NeffPerNu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("flcdm_inv_efunc", 1, 7, 7, 4); __PYX_ERR(0, 51, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nmasslessnu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("flcdm_inv_efunc", 1, 7, 7, 5); __PYX_ERR(0, 51, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 6: + if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nu_y)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("flcdm_inv_efunc", 1, 7, 7, 6); __PYX_ERR(0, 51, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "flcdm_inv_efunc") < 0)) __PYX_ERR(0, 51, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 51, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 51, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 51, __pyx_L3_error) + __pyx_v_Ogamma0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ogamma0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 51, __pyx_L3_error) + __pyx_v_NeffPerNu = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_NeffPerNu == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 52, __pyx_L3_error) + __pyx_v_nmasslessnu = __Pyx_PyInt_As_int(values[5]); if (unlikely((__pyx_v_nmasslessnu == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 52, __pyx_L3_error) + __pyx_v_nu_y = ((PyObject*)values[6]); + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("flcdm_inv_efunc", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 51, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.flcdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_nu_y), (&PyList_Type), 1, "nu_y", 1))) __PYX_ERR(0, 52, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_10flcdm_inv_efunc(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ogamma0, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_10flcdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y) { + double __pyx_v_opz; + double __pyx_v_Or0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + double __pyx_t_4; + __Pyx_RefNannySetupContext("flcdm_inv_efunc", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":54 + * double NeffPerNu, int nmasslessnu, list nu_y): + * + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * return pow(opz**3 * (opz * Or0 + Om0) + Ode0, -0.5) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":55 + * + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) # <<<<<<<<<<<<<< + * return pow(opz**3 * (opz * Or0 + Om0) + Ode0, -0.5) + * + */ + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_Ogamma0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 55, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __pyx_f_7astropy_9cosmology_17scalar_inv_efuncs_nufunc(__pyx_v_opz, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 55, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyFloat_AddCObj(__pyx_float_1_0, __pyx_t_2, 1.0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 55, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyNumber_Multiply(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 55, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 55, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v_Or0 = __pyx_t_4; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":56 + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * return pow(opz**3 * (opz * Or0 + Om0) + Ode0, -0.5) # <<<<<<<<<<<<<< + * + * ######## wCDM + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyFloat_FromDouble(pow(((pow(__pyx_v_opz, 3.0) * ((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0)) + __pyx_v_Ode0), -0.5)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 56, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":51 + * + * # With massive neutrinos + * def flcdm_inv_efunc(double z, double Om0, double Ode0, double Ogamma0, # <<<<<<<<<<<<<< + * double NeffPerNu, int nmasslessnu, list nu_y): + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.flcdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":60 + * ######## wCDM + * # No relativistic species + * def wcdm_inv_efunc_norel(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Ok0, double w0): + * cdef double opz = 1.0 + z + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_13wcdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_13wcdm_inv_efunc_norel = {"wcdm_inv_efunc_norel", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_13wcdm_inv_efunc_norel, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_13wcdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + double __pyx_v_w0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("wcdm_inv_efunc_norel (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,&__pyx_n_s_w0,0}; + PyObject* values[5] = {0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc_norel", 1, 5, 5, 1); __PYX_ERR(0, 60, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc_norel", 1, 5, 5, 2); __PYX_ERR(0, 60, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc_norel", 1, 5, 5, 3); __PYX_ERR(0, 60, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc_norel", 1, 5, 5, 4); __PYX_ERR(0, 60, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "wcdm_inv_efunc_norel") < 0)) __PYX_ERR(0, 60, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 60, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 60, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 60, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 61, __pyx_L3_error) + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 61, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc_norel", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 60, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.wcdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_12wcdm_inv_efunc_norel(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0, __pyx_v_w0); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_12wcdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_w0) { + double __pyx_v_opz; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("wcdm_inv_efunc_norel", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":62 + * def wcdm_inv_efunc_norel(double z, double Om0, double Ode0, + * double Ok0, double w0): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * return pow(opz**2 * (opz * Om0 + Ok0) + + * Ode0 * opz**(3. * (1.0 + w0)), -0.5) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":63 + * double Ok0, double w0): + * cdef double opz = 1.0 + z + * return pow(opz**2 * (opz * Om0 + Ok0) + # <<<<<<<<<<<<<< + * Ode0 * opz**(3. * (1.0 + w0)), -0.5) + * + */ + __Pyx_XDECREF(__pyx_r); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":64 + * cdef double opz = 1.0 + z + * return pow(opz**2 * (opz * Om0 + Ok0) + + * Ode0 * opz**(3. * (1.0 + w0)), -0.5) # <<<<<<<<<<<<<< + * + * # Massless neutrinos + */ + __pyx_t_1 = PyFloat_FromDouble(pow(((pow(__pyx_v_opz, 2.0) * ((__pyx_v_opz * __pyx_v_Om0) + __pyx_v_Ok0)) + (__pyx_v_Ode0 * pow(__pyx_v_opz, (3. * (1.0 + __pyx_v_w0))))), -0.5)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 63, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":60 + * ######## wCDM + * # No relativistic species + * def wcdm_inv_efunc_norel(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Ok0, double w0): + * cdef double opz = 1.0 + z + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.wcdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":67 + * + * # Massless neutrinos + * def wcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double w0): + * cdef double opz = 1.0 + z + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_15wcdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_15wcdm_inv_efunc_nomnu = {"wcdm_inv_efunc_nomnu", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_15wcdm_inv_efunc_nomnu, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_15wcdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + double __pyx_v_Or0; + double __pyx_v_w0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("wcdm_inv_efunc_nomnu (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,&__pyx_n_s_Or0,&__pyx_n_s_w0,0}; + PyObject* values[6] = {0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc_nomnu", 1, 6, 6, 1); __PYX_ERR(0, 67, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc_nomnu", 1, 6, 6, 2); __PYX_ERR(0, 67, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc_nomnu", 1, 6, 6, 3); __PYX_ERR(0, 67, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Or0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc_nomnu", 1, 6, 6, 4); __PYX_ERR(0, 67, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc_nomnu", 1, 6, 6, 5); __PYX_ERR(0, 67, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "wcdm_inv_efunc_nomnu") < 0)) __PYX_ERR(0, 67, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 6) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 67, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 67, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 67, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 67, __pyx_L3_error) + __pyx_v_Or0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_Or0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 68, __pyx_L3_error) + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 68, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc_nomnu", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 67, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.wcdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_14wcdm_inv_efunc_nomnu(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0, __pyx_v_Or0, __pyx_v_w0); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_14wcdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Or0, double __pyx_v_w0) { + double __pyx_v_opz; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("wcdm_inv_efunc_nomnu", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":69 + * def wcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, + * double Or0, double w0): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + * Ode0 * opz**(3. * (1.0 + w0)), -0.5) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":70 + * double Or0, double w0): + * cdef double opz = 1.0 + z + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + # <<<<<<<<<<<<<< + * Ode0 * opz**(3. * (1.0 + w0)), -0.5) + * + */ + __Pyx_XDECREF(__pyx_r); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":71 + * cdef double opz = 1.0 + z + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + * Ode0 * opz**(3. * (1.0 + w0)), -0.5) # <<<<<<<<<<<<<< + * + * # With massive neutrinos + */ + __pyx_t_1 = PyFloat_FromDouble(pow(((((((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0) * __pyx_v_opz) + __pyx_v_Ok0) * pow(__pyx_v_opz, 2.0)) + (__pyx_v_Ode0 * pow(__pyx_v_opz, (3. * (1.0 + __pyx_v_w0))))), -0.5)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":67 + * + * # Massless neutrinos + * def wcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double w0): + * cdef double opz = 1.0 + z + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.wcdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":74 + * + * # With massive neutrinos + * def wcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0): + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_17wcdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_17wcdm_inv_efunc = {"wcdm_inv_efunc", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_17wcdm_inv_efunc, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_17wcdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + double __pyx_v_Ogamma0; + double __pyx_v_NeffPerNu; + int __pyx_v_nmasslessnu; + PyObject *__pyx_v_nu_y = 0; + double __pyx_v_w0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("wcdm_inv_efunc (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,&__pyx_n_s_Ogamma0,&__pyx_n_s_NeffPerNu,&__pyx_n_s_nmasslessnu,&__pyx_n_s_nu_y,&__pyx_n_s_w0,0}; + PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); + CYTHON_FALLTHROUGH; + case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + CYTHON_FALLTHROUGH; + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc", 1, 9, 9, 1); __PYX_ERR(0, 74, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc", 1, 9, 9, 2); __PYX_ERR(0, 74, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc", 1, 9, 9, 3); __PYX_ERR(0, 74, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ogamma0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc", 1, 9, 9, 4); __PYX_ERR(0, 74, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_NeffPerNu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc", 1, 9, 9, 5); __PYX_ERR(0, 74, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 6: + if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nmasslessnu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc", 1, 9, 9, 6); __PYX_ERR(0, 74, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 7: + if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nu_y)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc", 1, 9, 9, 7); __PYX_ERR(0, 74, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 8: + if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc", 1, 9, 9, 8); __PYX_ERR(0, 74, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "wcdm_inv_efunc") < 0)) __PYX_ERR(0, 74, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + values[8] = PyTuple_GET_ITEM(__pyx_args, 8); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 74, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 74, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 74, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 74, __pyx_L3_error) + __pyx_v_Ogamma0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_Ogamma0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L3_error) + __pyx_v_NeffPerNu = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_NeffPerNu == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L3_error) + __pyx_v_nmasslessnu = __Pyx_PyInt_As_int(values[6]); if (unlikely((__pyx_v_nmasslessnu == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L3_error) + __pyx_v_nu_y = ((PyObject*)values[7]); + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("wcdm_inv_efunc", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 74, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.wcdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_nu_y), (&PyList_Type), 1, "nu_y", 1))) __PYX_ERR(0, 75, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_16wcdm_inv_efunc(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0, __pyx_v_Ogamma0, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y, __pyx_v_w0); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_16wcdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y, double __pyx_v_w0) { + double __pyx_v_opz; + double __pyx_v_Or0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + double __pyx_t_4; + __Pyx_RefNannySetupContext("wcdm_inv_efunc", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":77 + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0): + * + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":78 + * + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) # <<<<<<<<<<<<<< + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + * Ode0 * opz**(3. * (1.0 + w0)), -0.5) + */ + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_Ogamma0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __pyx_f_7astropy_9cosmology_17scalar_inv_efuncs_nufunc(__pyx_v_opz, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyFloat_AddCObj(__pyx_float_1_0, __pyx_t_2, 1.0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyNumber_Multiply(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v_Or0 = __pyx_t_4; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":79 + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + # <<<<<<<<<<<<<< + * Ode0 * opz**(3. * (1.0 + w0)), -0.5) + * + */ + __Pyx_XDECREF(__pyx_r); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":80 + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + * Ode0 * opz**(3. * (1.0 + w0)), -0.5) # <<<<<<<<<<<<<< + * + * ######## Flat wCDM + */ + __pyx_t_2 = PyFloat_FromDouble(pow(((((((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0) * __pyx_v_opz) + __pyx_v_Ok0) * pow(__pyx_v_opz, 2.0)) + (__pyx_v_Ode0 * pow(__pyx_v_opz, (3. * (1.0 + __pyx_v_w0))))), -0.5)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 79, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":74 + * + * # With massive neutrinos + * def wcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0): + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.wcdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":84 + * ######## Flat wCDM + * # No relativistic species + * def fwcdm_inv_efunc_norel(double z, double Om0, double Ode0, double w0): # <<<<<<<<<<<<<< + * cdef double opz = 1.0 + z + * return pow(opz**3 * Om0 + Ode0 * opz**(3. * (1.0 + w0)), -0.5) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_19fwcdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_19fwcdm_inv_efunc_norel = {"fwcdm_inv_efunc_norel", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_19fwcdm_inv_efunc_norel, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_19fwcdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_w0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("fwcdm_inv_efunc_norel (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_w0,0}; + PyObject* values[4] = {0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc_norel", 1, 4, 4, 1); __PYX_ERR(0, 84, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc_norel", 1, 4, 4, 2); __PYX_ERR(0, 84, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc_norel", 1, 4, 4, 3); __PYX_ERR(0, 84, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fwcdm_inv_efunc_norel") < 0)) __PYX_ERR(0, 84, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 84, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 84, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 84, __pyx_L3_error) + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 84, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc_norel", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 84, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.fwcdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_18fwcdm_inv_efunc_norel(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_w0); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_18fwcdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_w0) { + double __pyx_v_opz; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("fwcdm_inv_efunc_norel", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":85 + * # No relativistic species + * def fwcdm_inv_efunc_norel(double z, double Om0, double Ode0, double w0): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * return pow(opz**3 * Om0 + Ode0 * opz**(3. * (1.0 + w0)), -0.5) + * + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":86 + * def fwcdm_inv_efunc_norel(double z, double Om0, double Ode0, double w0): + * cdef double opz = 1.0 + z + * return pow(opz**3 * Om0 + Ode0 * opz**(3. * (1.0 + w0)), -0.5) # <<<<<<<<<<<<<< + * + * # Massless neutrinos + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyFloat_FromDouble(pow(((pow(__pyx_v_opz, 3.0) * __pyx_v_Om0) + (__pyx_v_Ode0 * pow(__pyx_v_opz, (3. * (1.0 + __pyx_v_w0))))), -0.5)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":84 + * ######## Flat wCDM + * # No relativistic species + * def fwcdm_inv_efunc_norel(double z, double Om0, double Ode0, double w0): # <<<<<<<<<<<<<< + * cdef double opz = 1.0 + z + * return pow(opz**3 * Om0 + Ode0 * opz**(3. * (1.0 + w0)), -0.5) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.fwcdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":89 + * + * # Massless neutrinos + * def fwcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Or0, double w0): + * cdef double opz = 1.0 + z + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_21fwcdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_21fwcdm_inv_efunc_nomnu = {"fwcdm_inv_efunc_nomnu", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_21fwcdm_inv_efunc_nomnu, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_21fwcdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Or0; + double __pyx_v_w0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("fwcdm_inv_efunc_nomnu (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Or0,&__pyx_n_s_w0,0}; + PyObject* values[5] = {0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc_nomnu", 1, 5, 5, 1); __PYX_ERR(0, 89, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc_nomnu", 1, 5, 5, 2); __PYX_ERR(0, 89, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Or0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc_nomnu", 1, 5, 5, 3); __PYX_ERR(0, 89, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc_nomnu", 1, 5, 5, 4); __PYX_ERR(0, 89, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fwcdm_inv_efunc_nomnu") < 0)) __PYX_ERR(0, 89, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 89, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 89, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 89, __pyx_L3_error) + __pyx_v_Or0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Or0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 90, __pyx_L3_error) + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 90, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc_nomnu", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 89, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.fwcdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_20fwcdm_inv_efunc_nomnu(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Or0, __pyx_v_w0); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_20fwcdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Or0, double __pyx_v_w0) { + double __pyx_v_opz; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("fwcdm_inv_efunc_nomnu", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":91 + * def fwcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, + * double Or0, double w0): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * return pow(opz**3 * (opz * Or0 + Om0) + + * Ode0 * opz**(3. * (1.0 + w0)), -0.5) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":92 + * double Or0, double w0): + * cdef double opz = 1.0 + z + * return pow(opz**3 * (opz * Or0 + Om0) + # <<<<<<<<<<<<<< + * Ode0 * opz**(3. * (1.0 + w0)), -0.5) + * + */ + __Pyx_XDECREF(__pyx_r); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":93 + * cdef double opz = 1.0 + z + * return pow(opz**3 * (opz * Or0 + Om0) + + * Ode0 * opz**(3. * (1.0 + w0)), -0.5) # <<<<<<<<<<<<<< + * + * # With massive neutrinos + */ + __pyx_t_1 = PyFloat_FromDouble(pow(((pow(__pyx_v_opz, 3.0) * ((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0)) + (__pyx_v_Ode0 * pow(__pyx_v_opz, (3. * (1.0 + __pyx_v_w0))))), -0.5)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 92, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":89 + * + * # Massless neutrinos + * def fwcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Or0, double w0): + * cdef double opz = 1.0 + z + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.fwcdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":96 + * + * # With massive neutrinos + * def fwcdm_inv_efunc(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0): + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_23fwcdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_23fwcdm_inv_efunc = {"fwcdm_inv_efunc", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_23fwcdm_inv_efunc, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_23fwcdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ogamma0; + double __pyx_v_NeffPerNu; + int __pyx_v_nmasslessnu; + PyObject *__pyx_v_nu_y = 0; + double __pyx_v_w0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("fwcdm_inv_efunc (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ogamma0,&__pyx_n_s_NeffPerNu,&__pyx_n_s_nmasslessnu,&__pyx_n_s_nu_y,&__pyx_n_s_w0,0}; + PyObject* values[8] = {0,0,0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + CYTHON_FALLTHROUGH; + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc", 1, 8, 8, 1); __PYX_ERR(0, 96, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc", 1, 8, 8, 2); __PYX_ERR(0, 96, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ogamma0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc", 1, 8, 8, 3); __PYX_ERR(0, 96, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_NeffPerNu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc", 1, 8, 8, 4); __PYX_ERR(0, 96, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nmasslessnu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc", 1, 8, 8, 5); __PYX_ERR(0, 96, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 6: + if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nu_y)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc", 1, 8, 8, 6); __PYX_ERR(0, 96, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 7: + if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc", 1, 8, 8, 7); __PYX_ERR(0, 96, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fwcdm_inv_efunc") < 0)) __PYX_ERR(0, 96, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 8) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 96, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 96, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 96, __pyx_L3_error) + __pyx_v_Ogamma0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ogamma0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 97, __pyx_L3_error) + __pyx_v_NeffPerNu = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_NeffPerNu == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 97, __pyx_L3_error) + __pyx_v_nmasslessnu = __Pyx_PyInt_As_int(values[5]); if (unlikely((__pyx_v_nmasslessnu == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 97, __pyx_L3_error) + __pyx_v_nu_y = ((PyObject*)values[6]); + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 97, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("fwcdm_inv_efunc", 1, 8, 8, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 96, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.fwcdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_nu_y), (&PyList_Type), 1, "nu_y", 1))) __PYX_ERR(0, 97, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_22fwcdm_inv_efunc(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ogamma0, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y, __pyx_v_w0); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_22fwcdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y, double __pyx_v_w0) { + double __pyx_v_opz; + double __pyx_v_Or0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + double __pyx_t_4; + __Pyx_RefNannySetupContext("fwcdm_inv_efunc", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":99 + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0): + * + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * return pow(opz**3 * (opz * Or0 + Om0) + Ode0 * opz**(3. * (1.0 + w0)), -0.5) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":100 + * + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) # <<<<<<<<<<<<<< + * return pow(opz**3 * (opz * Or0 + Om0) + Ode0 * opz**(3. * (1.0 + w0)), -0.5) + * + */ + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_Ogamma0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 100, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __pyx_f_7astropy_9cosmology_17scalar_inv_efuncs_nufunc(__pyx_v_opz, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 100, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyFloat_AddCObj(__pyx_float_1_0, __pyx_t_2, 1.0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 100, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyNumber_Multiply(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 100, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 100, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v_Or0 = __pyx_t_4; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":101 + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * return pow(opz**3 * (opz * Or0 + Om0) + Ode0 * opz**(3. * (1.0 + w0)), -0.5) # <<<<<<<<<<<<<< + * + * ######## w0waCDM + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyFloat_FromDouble(pow(((pow(__pyx_v_opz, 3.0) * ((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0)) + (__pyx_v_Ode0 * pow(__pyx_v_opz, (3. * (1.0 + __pyx_v_w0))))), -0.5)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 101, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":96 + * + * # With massive neutrinos + * def fwcdm_inv_efunc(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0): + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.fwcdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":105 + * ######## w0waCDM + * # No relativistic species + * def w0wacdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double w0, double wa): + * cdef double opz = 1.0 + z + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_25w0wacdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_25w0wacdm_inv_efunc_norel = {"w0wacdm_inv_efunc_norel", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_25w0wacdm_inv_efunc_norel, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_25w0wacdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + double __pyx_v_w0; + double __pyx_v_wa; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("w0wacdm_inv_efunc_norel (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,&__pyx_n_s_w0,&__pyx_n_s_wa,0}; + PyObject* values[6] = {0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc_norel", 1, 6, 6, 1); __PYX_ERR(0, 105, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc_norel", 1, 6, 6, 2); __PYX_ERR(0, 105, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc_norel", 1, 6, 6, 3); __PYX_ERR(0, 105, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc_norel", 1, 6, 6, 4); __PYX_ERR(0, 105, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wa)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc_norel", 1, 6, 6, 5); __PYX_ERR(0, 105, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "w0wacdm_inv_efunc_norel") < 0)) __PYX_ERR(0, 105, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 6) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 105, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 105, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 105, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 105, __pyx_L3_error) + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 106, __pyx_L3_error) + __pyx_v_wa = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_wa == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 106, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc_norel", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 105, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.w0wacdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_24w0wacdm_inv_efunc_norel(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0, __pyx_v_w0, __pyx_v_wa); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_24w0wacdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_w0, double __pyx_v_wa) { + double __pyx_v_opz; + PyObject *__pyx_v_Odescl = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + double __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + __Pyx_RefNannySetupContext("w0wacdm_inv_efunc_norel", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":107 + * def w0wacdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, + * double w0, double wa): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0 * Odescl, -0.5) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":108 + * double w0, double wa): + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) # <<<<<<<<<<<<<< + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0 * Odescl, -0.5) + * + */ + __pyx_t_1 = ((-3.0 * __pyx_v_wa) * __pyx_v_z); + if (unlikely(__pyx_v_opz == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 108, __pyx_L1_error) + } + __pyx_t_2 = PyFloat_FromDouble((pow(__pyx_v_opz, (3. * ((1.0 + __pyx_v_w0) + __pyx_v_wa))) * exp((__pyx_t_1 / __pyx_v_opz)))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 108, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_Odescl = __pyx_t_2; + __pyx_t_2 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":109 + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0 * Odescl, -0.5) # <<<<<<<<<<<<<< + * + * # Massless neutrinos + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyFloat_FromDouble((pow(__pyx_v_opz, 2.0) * ((__pyx_v_opz * __pyx_v_Om0) + __pyx_v_Ok0))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 109, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyFloat_FromDouble(__pyx_v_Ode0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 109, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyNumber_Multiply(__pyx_t_3, __pyx_v_Odescl); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 109, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 109, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 109, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyFloat_FromDouble(pow(__pyx_t_1, -0.5)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 109, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":105 + * ######## w0waCDM + * # No relativistic species + * def w0wacdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double w0, double wa): + * cdef double opz = 1.0 + z + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.w0wacdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_Odescl); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":112 + * + * # Massless neutrinos + * def w0wacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double w0, double wa): + * cdef double opz = 1.0 + z + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_27w0wacdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_27w0wacdm_inv_efunc_nomnu = {"w0wacdm_inv_efunc_nomnu", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_27w0wacdm_inv_efunc_nomnu, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_27w0wacdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + double __pyx_v_Or0; + double __pyx_v_w0; + double __pyx_v_wa; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("w0wacdm_inv_efunc_nomnu (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,&__pyx_n_s_Or0,&__pyx_n_s_w0,&__pyx_n_s_wa,0}; + PyObject* values[7] = {0,0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc_nomnu", 1, 7, 7, 1); __PYX_ERR(0, 112, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc_nomnu", 1, 7, 7, 2); __PYX_ERR(0, 112, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc_nomnu", 1, 7, 7, 3); __PYX_ERR(0, 112, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Or0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc_nomnu", 1, 7, 7, 4); __PYX_ERR(0, 112, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc_nomnu", 1, 7, 7, 5); __PYX_ERR(0, 112, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 6: + if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wa)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc_nomnu", 1, 7, 7, 6); __PYX_ERR(0, 112, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "w0wacdm_inv_efunc_nomnu") < 0)) __PYX_ERR(0, 112, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 112, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 112, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 112, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 112, __pyx_L3_error) + __pyx_v_Or0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_Or0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 113, __pyx_L3_error) + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 113, __pyx_L3_error) + __pyx_v_wa = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_wa == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 113, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc_nomnu", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 112, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.w0wacdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_26w0wacdm_inv_efunc_nomnu(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0, __pyx_v_Or0, __pyx_v_w0, __pyx_v_wa); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_26w0wacdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Or0, double __pyx_v_w0, double __pyx_v_wa) { + double __pyx_v_opz; + PyObject *__pyx_v_Odescl = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + double __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + __Pyx_RefNannySetupContext("w0wacdm_inv_efunc_nomnu", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":114 + * def w0wacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, + * double Or0, double w0, double wa): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":115 + * double Or0, double w0, double wa): + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) # <<<<<<<<<<<<<< + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + * Ode0 * Odescl, -0.5) + */ + __pyx_t_1 = ((-3.0 * __pyx_v_wa) * __pyx_v_z); + if (unlikely(__pyx_v_opz == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 115, __pyx_L1_error) + } + __pyx_t_2 = PyFloat_FromDouble((pow(__pyx_v_opz, (3. * ((1.0 + __pyx_v_w0) + __pyx_v_wa))) * exp((__pyx_t_1 / __pyx_v_opz)))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 115, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_Odescl = __pyx_t_2; + __pyx_t_2 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":116 + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + # <<<<<<<<<<<<<< + * Ode0 * Odescl, -0.5) + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyFloat_FromDouble((((((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0) * __pyx_v_opz) + __pyx_v_Ok0) * pow(__pyx_v_opz, 2.0))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 116, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":117 + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + * Ode0 * Odescl, -0.5) # <<<<<<<<<<<<<< + * + * def w0wacdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, + */ + __pyx_t_3 = PyFloat_FromDouble(__pyx_v_Ode0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 117, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyNumber_Multiply(__pyx_t_3, __pyx_v_Odescl); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 117, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":116 + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + # <<<<<<<<<<<<<< + * Ode0 * Odescl, -0.5) + * + */ + __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 116, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyFloat_FromDouble(pow(__pyx_t_1, -0.5)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":112 + * + * # Massless neutrinos + * def w0wacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double w0, double wa): + * cdef double opz = 1.0 + z + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.w0wacdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_Odescl); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":119 + * Ode0 * Odescl, -0.5) + * + * def w0wacdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + * double wa): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_29w0wacdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_29w0wacdm_inv_efunc = {"w0wacdm_inv_efunc", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_29w0wacdm_inv_efunc, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_29w0wacdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + double __pyx_v_Ogamma0; + double __pyx_v_NeffPerNu; + int __pyx_v_nmasslessnu; + PyObject *__pyx_v_nu_y = 0; + double __pyx_v_w0; + double __pyx_v_wa; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("w0wacdm_inv_efunc (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,&__pyx_n_s_Ogamma0,&__pyx_n_s_NeffPerNu,&__pyx_n_s_nmasslessnu,&__pyx_n_s_nu_y,&__pyx_n_s_w0,&__pyx_n_s_wa,0}; + PyObject* values[10] = {0,0,0,0,0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); + CYTHON_FALLTHROUGH; + case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); + CYTHON_FALLTHROUGH; + case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + CYTHON_FALLTHROUGH; + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc", 1, 10, 10, 1); __PYX_ERR(0, 119, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc", 1, 10, 10, 2); __PYX_ERR(0, 119, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc", 1, 10, 10, 3); __PYX_ERR(0, 119, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ogamma0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc", 1, 10, 10, 4); __PYX_ERR(0, 119, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_NeffPerNu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc", 1, 10, 10, 5); __PYX_ERR(0, 119, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 6: + if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nmasslessnu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc", 1, 10, 10, 6); __PYX_ERR(0, 119, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 7: + if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nu_y)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc", 1, 10, 10, 7); __PYX_ERR(0, 119, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 8: + if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc", 1, 10, 10, 8); __PYX_ERR(0, 119, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 9: + if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wa)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc", 1, 10, 10, 9); __PYX_ERR(0, 119, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "w0wacdm_inv_efunc") < 0)) __PYX_ERR(0, 119, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 10) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + values[8] = PyTuple_GET_ITEM(__pyx_args, 8); + values[9] = PyTuple_GET_ITEM(__pyx_args, 9); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 119, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 119, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 119, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 119, __pyx_L3_error) + __pyx_v_Ogamma0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_Ogamma0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 120, __pyx_L3_error) + __pyx_v_NeffPerNu = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_NeffPerNu == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 120, __pyx_L3_error) + __pyx_v_nmasslessnu = __Pyx_PyInt_As_int(values[6]); if (unlikely((__pyx_v_nmasslessnu == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 120, __pyx_L3_error) + __pyx_v_nu_y = ((PyObject*)values[7]); + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 120, __pyx_L3_error) + __pyx_v_wa = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_wa == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 121, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("w0wacdm_inv_efunc", 1, 10, 10, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 119, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.w0wacdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_nu_y), (&PyList_Type), 1, "nu_y", 1))) __PYX_ERR(0, 120, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_28w0wacdm_inv_efunc(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0, __pyx_v_Ogamma0, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y, __pyx_v_w0, __pyx_v_wa); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_28w0wacdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y, double __pyx_v_w0, double __pyx_v_wa) { + double __pyx_v_opz; + double __pyx_v_Or0; + PyObject *__pyx_v_Odescl = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + double __pyx_t_4; + __Pyx_RefNannySetupContext("w0wacdm_inv_efunc", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":123 + * double wa): + * + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":124 + * + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) # <<<<<<<<<<<<<< + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0 * Odescl, -0.5) + */ + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_Ogamma0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 124, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __pyx_f_7astropy_9cosmology_17scalar_inv_efuncs_nufunc(__pyx_v_opz, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 124, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyFloat_AddCObj(__pyx_float_1_0, __pyx_t_2, 1.0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 124, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyNumber_Multiply(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 124, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 124, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v_Or0 = __pyx_t_4; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":125 + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) # <<<<<<<<<<<<<< + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0 * Odescl, -0.5) + * + */ + __pyx_t_4 = ((-3.0 * __pyx_v_wa) * __pyx_v_z); + if (unlikely(__pyx_v_opz == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 125, __pyx_L1_error) + } + __pyx_t_2 = PyFloat_FromDouble((pow(__pyx_v_opz, (3. * ((1.0 + __pyx_v_w0) + __pyx_v_wa))) * exp((__pyx_t_4 / __pyx_v_opz)))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 125, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_Odescl = __pyx_t_2; + __pyx_t_2 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":126 + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0 * Odescl, -0.5) # <<<<<<<<<<<<<< + * + * ######## Flatw0waCDM + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyFloat_FromDouble((((((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0) * __pyx_v_opz) + __pyx_v_Ok0) * pow(__pyx_v_opz, 2.0))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyFloat_FromDouble(__pyx_v_Ode0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = PyNumber_Multiply(__pyx_t_3, __pyx_v_Odescl); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyFloat_FromDouble(pow(__pyx_t_4, -0.5)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":119 + * Ode0 * Odescl, -0.5) + * + * def w0wacdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + * double wa): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.w0wacdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_Odescl); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":130 + * ######## Flatw0waCDM + * # No relativistic species + * def fw0wacdm_inv_efunc_norel(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double w0, double wa): + * cdef double opz = 1.0 + z + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_31fw0wacdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_31fw0wacdm_inv_efunc_norel = {"fw0wacdm_inv_efunc_norel", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_31fw0wacdm_inv_efunc_norel, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_31fw0wacdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_w0; + double __pyx_v_wa; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("fw0wacdm_inv_efunc_norel (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_w0,&__pyx_n_s_wa,0}; + PyObject* values[5] = {0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc_norel", 1, 5, 5, 1); __PYX_ERR(0, 130, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc_norel", 1, 5, 5, 2); __PYX_ERR(0, 130, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc_norel", 1, 5, 5, 3); __PYX_ERR(0, 130, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wa)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc_norel", 1, 5, 5, 4); __PYX_ERR(0, 130, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fw0wacdm_inv_efunc_norel") < 0)) __PYX_ERR(0, 130, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 130, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 130, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 130, __pyx_L3_error) + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 131, __pyx_L3_error) + __pyx_v_wa = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_wa == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 131, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc_norel", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 130, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.fw0wacdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_30fw0wacdm_inv_efunc_norel(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_w0, __pyx_v_wa); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_30fw0wacdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_w0, double __pyx_v_wa) { + double __pyx_v_opz; + PyObject *__pyx_v_Odescl = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + double __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + __Pyx_RefNannySetupContext("fw0wacdm_inv_efunc_norel", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":132 + * def fw0wacdm_inv_efunc_norel(double z, double Om0, double Ode0, + * double w0, double wa): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + * return pow(opz**3 * Om0 + Ode0 * Odescl, -0.5) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":133 + * double w0, double wa): + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) # <<<<<<<<<<<<<< + * return pow(opz**3 * Om0 + Ode0 * Odescl, -0.5) + * + */ + __pyx_t_1 = ((-3.0 * __pyx_v_wa) * __pyx_v_z); + if (unlikely(__pyx_v_opz == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 133, __pyx_L1_error) + } + __pyx_t_2 = PyFloat_FromDouble((pow(__pyx_v_opz, (3. * ((1.0 + __pyx_v_w0) + __pyx_v_wa))) * exp((__pyx_t_1 / __pyx_v_opz)))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_Odescl = __pyx_t_2; + __pyx_t_2 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":134 + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + * return pow(opz**3 * Om0 + Ode0 * Odescl, -0.5) # <<<<<<<<<<<<<< + * + * # Massless neutrinos + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyFloat_FromDouble((pow(__pyx_v_opz, 3.0) * __pyx_v_Om0)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyFloat_FromDouble(__pyx_v_Ode0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyNumber_Multiply(__pyx_t_3, __pyx_v_Odescl); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyFloat_FromDouble(pow(__pyx_t_1, -0.5)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 134, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":130 + * ######## Flatw0waCDM + * # No relativistic species + * def fw0wacdm_inv_efunc_norel(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double w0, double wa): + * cdef double opz = 1.0 + z + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.fw0wacdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_Odescl); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":137 + * + * # Massless neutrinos + * def fw0wacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Or0, double w0, double wa): + * cdef double opz = 1.0 + z + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_33fw0wacdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_33fw0wacdm_inv_efunc_nomnu = {"fw0wacdm_inv_efunc_nomnu", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_33fw0wacdm_inv_efunc_nomnu, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_33fw0wacdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Or0; + double __pyx_v_w0; + double __pyx_v_wa; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("fw0wacdm_inv_efunc_nomnu (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Or0,&__pyx_n_s_w0,&__pyx_n_s_wa,0}; + PyObject* values[6] = {0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc_nomnu", 1, 6, 6, 1); __PYX_ERR(0, 137, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc_nomnu", 1, 6, 6, 2); __PYX_ERR(0, 137, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Or0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc_nomnu", 1, 6, 6, 3); __PYX_ERR(0, 137, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc_nomnu", 1, 6, 6, 4); __PYX_ERR(0, 137, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wa)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc_nomnu", 1, 6, 6, 5); __PYX_ERR(0, 137, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fw0wacdm_inv_efunc_nomnu") < 0)) __PYX_ERR(0, 137, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 6) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 137, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 137, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 137, __pyx_L3_error) + __pyx_v_Or0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Or0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 138, __pyx_L3_error) + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 138, __pyx_L3_error) + __pyx_v_wa = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_wa == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 138, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc_nomnu", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 137, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.fw0wacdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_32fw0wacdm_inv_efunc_nomnu(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Or0, __pyx_v_w0, __pyx_v_wa); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_32fw0wacdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Or0, double __pyx_v_w0, double __pyx_v_wa) { + double __pyx_v_opz; + PyObject *__pyx_v_Odescl = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + double __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + __Pyx_RefNannySetupContext("fw0wacdm_inv_efunc_nomnu", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":139 + * def fw0wacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, + * double Or0, double w0, double wa): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + * return pow((opz * Or0 + Om0) * opz**3 + Ode0 * Odescl, -0.5) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":140 + * double Or0, double w0, double wa): + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) # <<<<<<<<<<<<<< + * return pow((opz * Or0 + Om0) * opz**3 + Ode0 * Odescl, -0.5) + * + */ + __pyx_t_1 = ((-3.0 * __pyx_v_wa) * __pyx_v_z); + if (unlikely(__pyx_v_opz == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 140, __pyx_L1_error) + } + __pyx_t_2 = PyFloat_FromDouble((pow(__pyx_v_opz, (3. * ((1.0 + __pyx_v_w0) + __pyx_v_wa))) * exp((__pyx_t_1 / __pyx_v_opz)))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_Odescl = __pyx_t_2; + __pyx_t_2 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":141 + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + * return pow((opz * Or0 + Om0) * opz**3 + Ode0 * Odescl, -0.5) # <<<<<<<<<<<<<< + * + * # With massive neutrinos + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyFloat_FromDouble((((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0) * pow(__pyx_v_opz, 3.0))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyFloat_FromDouble(__pyx_v_Ode0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyNumber_Multiply(__pyx_t_3, __pyx_v_Odescl); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyFloat_FromDouble(pow(__pyx_t_1, -0.5)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 141, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":137 + * + * # Massless neutrinos + * def fw0wacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Or0, double w0, double wa): + * cdef double opz = 1.0 + z + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.fw0wacdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_Odescl); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":144 + * + * # With massive neutrinos + * def fw0wacdm_inv_efunc(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + * double wa): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_35fw0wacdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_35fw0wacdm_inv_efunc = {"fw0wacdm_inv_efunc", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_35fw0wacdm_inv_efunc, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_35fw0wacdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ogamma0; + double __pyx_v_NeffPerNu; + int __pyx_v_nmasslessnu; + PyObject *__pyx_v_nu_y = 0; + double __pyx_v_w0; + double __pyx_v_wa; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("fw0wacdm_inv_efunc (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ogamma0,&__pyx_n_s_NeffPerNu,&__pyx_n_s_nmasslessnu,&__pyx_n_s_nu_y,&__pyx_n_s_w0,&__pyx_n_s_wa,0}; + PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); + CYTHON_FALLTHROUGH; + case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + CYTHON_FALLTHROUGH; + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc", 1, 9, 9, 1); __PYX_ERR(0, 144, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc", 1, 9, 9, 2); __PYX_ERR(0, 144, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ogamma0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc", 1, 9, 9, 3); __PYX_ERR(0, 144, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_NeffPerNu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc", 1, 9, 9, 4); __PYX_ERR(0, 144, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nmasslessnu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc", 1, 9, 9, 5); __PYX_ERR(0, 144, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 6: + if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nu_y)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc", 1, 9, 9, 6); __PYX_ERR(0, 144, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 7: + if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc", 1, 9, 9, 7); __PYX_ERR(0, 144, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 8: + if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wa)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc", 1, 9, 9, 8); __PYX_ERR(0, 144, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fw0wacdm_inv_efunc") < 0)) __PYX_ERR(0, 144, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + values[8] = PyTuple_GET_ITEM(__pyx_args, 8); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 144, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 144, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 144, __pyx_L3_error) + __pyx_v_Ogamma0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ogamma0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 145, __pyx_L3_error) + __pyx_v_NeffPerNu = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_NeffPerNu == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 145, __pyx_L3_error) + __pyx_v_nmasslessnu = __Pyx_PyInt_As_int(values[5]); if (unlikely((__pyx_v_nmasslessnu == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 145, __pyx_L3_error) + __pyx_v_nu_y = ((PyObject*)values[6]); + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 145, __pyx_L3_error) + __pyx_v_wa = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_wa == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 146, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("fw0wacdm_inv_efunc", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 144, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.fw0wacdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_nu_y), (&PyList_Type), 1, "nu_y", 1))) __PYX_ERR(0, 145, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_34fw0wacdm_inv_efunc(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ogamma0, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y, __pyx_v_w0, __pyx_v_wa); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_34fw0wacdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y, double __pyx_v_w0, double __pyx_v_wa) { + double __pyx_v_opz; + double __pyx_v_Or0; + PyObject *__pyx_v_Odescl = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + double __pyx_t_4; + __Pyx_RefNannySetupContext("fw0wacdm_inv_efunc", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":148 + * double wa): + * + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":149 + * + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) # <<<<<<<<<<<<<< + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + * return pow((opz * Or0 + Om0) * opz**3 + Ode0 * Odescl, -0.5) + */ + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_Ogamma0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __pyx_f_7astropy_9cosmology_17scalar_inv_efuncs_nufunc(__pyx_v_opz, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyFloat_AddCObj(__pyx_float_1_0, __pyx_t_2, 1.0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyNumber_Multiply(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v_Or0 = __pyx_t_4; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":150 + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) # <<<<<<<<<<<<<< + * return pow((opz * Or0 + Om0) * opz**3 + Ode0 * Odescl, -0.5) + * + */ + __pyx_t_4 = ((-3.0 * __pyx_v_wa) * __pyx_v_z); + if (unlikely(__pyx_v_opz == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 150, __pyx_L1_error) + } + __pyx_t_2 = PyFloat_FromDouble((pow(__pyx_v_opz, (3. * ((1.0 + __pyx_v_w0) + __pyx_v_wa))) * exp((__pyx_t_4 / __pyx_v_opz)))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 150, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_Odescl = __pyx_t_2; + __pyx_t_2 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":151 + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + * return pow((opz * Or0 + Om0) * opz**3 + Ode0 * Odescl, -0.5) # <<<<<<<<<<<<<< + * + * ######## wpwaCDM + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyFloat_FromDouble((((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0) * pow(__pyx_v_opz, 3.0))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 151, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyFloat_FromDouble(__pyx_v_Ode0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 151, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = PyNumber_Multiply(__pyx_t_3, __pyx_v_Odescl); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 151, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 151, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 151, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyFloat_FromDouble(pow(__pyx_t_4, -0.5)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 151, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":144 + * + * # With massive neutrinos + * def fw0wacdm_inv_efunc(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + * double wa): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.fw0wacdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_Odescl); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":155 + * ######## wpwaCDM + * # No relativistic species + * def wpwacdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double wp, double apiv, double wa): + * cdef double opz = 1.0 + z + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_37wpwacdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_37wpwacdm_inv_efunc_norel = {"wpwacdm_inv_efunc_norel", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_37wpwacdm_inv_efunc_norel, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_37wpwacdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + double __pyx_v_wp; + double __pyx_v_apiv; + double __pyx_v_wa; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("wpwacdm_inv_efunc_norel (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,&__pyx_n_s_wp,&__pyx_n_s_apiv,&__pyx_n_s_wa,0}; + PyObject* values[7] = {0,0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_norel", 1, 7, 7, 1); __PYX_ERR(0, 155, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_norel", 1, 7, 7, 2); __PYX_ERR(0, 155, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_norel", 1, 7, 7, 3); __PYX_ERR(0, 155, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wp)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_norel", 1, 7, 7, 4); __PYX_ERR(0, 155, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_apiv)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_norel", 1, 7, 7, 5); __PYX_ERR(0, 155, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 6: + if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wa)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_norel", 1, 7, 7, 6); __PYX_ERR(0, 155, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "wpwacdm_inv_efunc_norel") < 0)) __PYX_ERR(0, 155, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 155, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 155, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 155, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 155, __pyx_L3_error) + __pyx_v_wp = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_wp == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 156, __pyx_L3_error) + __pyx_v_apiv = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_apiv == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 156, __pyx_L3_error) + __pyx_v_wa = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_wa == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 156, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_norel", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 155, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.wpwacdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_36wpwacdm_inv_efunc_norel(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0, __pyx_v_wp, __pyx_v_apiv, __pyx_v_wa); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_36wpwacdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_wp, double __pyx_v_apiv, double __pyx_v_wa) { + double __pyx_v_opz; + PyObject *__pyx_v_Odescl = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + double __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + __Pyx_RefNannySetupContext("wpwacdm_inv_efunc_norel", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":157 + * def wpwacdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, + * double wp, double apiv, double wa): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0 * Odescl, -0.5) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":158 + * double wp, double apiv, double wa): + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) # <<<<<<<<<<<<<< + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0 * Odescl, -0.5) + * + */ + __pyx_t_1 = ((-3. * __pyx_v_wa) * __pyx_v_z); + if (unlikely(__pyx_v_opz == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 158, __pyx_L1_error) + } + __pyx_t_2 = PyFloat_FromDouble((pow(__pyx_v_opz, (3. * ((1. + __pyx_v_wp) + (__pyx_v_apiv * __pyx_v_wa)))) * exp((__pyx_t_1 / __pyx_v_opz)))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 158, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_Odescl = __pyx_t_2; + __pyx_t_2 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":159 + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0 * Odescl, -0.5) # <<<<<<<<<<<<<< + * + * # Massless neutrinos + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyFloat_FromDouble((pow(__pyx_v_opz, 2.0) * ((__pyx_v_opz * __pyx_v_Om0) + __pyx_v_Ok0))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyFloat_FromDouble(__pyx_v_Ode0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyNumber_Multiply(__pyx_t_3, __pyx_v_Odescl); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyFloat_FromDouble(pow(__pyx_t_1, -0.5)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":155 + * ######## wpwaCDM + * # No relativistic species + * def wpwacdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double wp, double apiv, double wa): + * cdef double opz = 1.0 + z + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.wpwacdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_Odescl); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":162 + * + * # Massless neutrinos + * def wpwacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double wp, double apiv, double wa): + * cdef double opz = 1.0 + z + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_39wpwacdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_39wpwacdm_inv_efunc_nomnu = {"wpwacdm_inv_efunc_nomnu", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_39wpwacdm_inv_efunc_nomnu, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_39wpwacdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + double __pyx_v_Or0; + double __pyx_v_wp; + double __pyx_v_apiv; + double __pyx_v_wa; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("wpwacdm_inv_efunc_nomnu (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,&__pyx_n_s_Or0,&__pyx_n_s_wp,&__pyx_n_s_apiv,&__pyx_n_s_wa,0}; + PyObject* values[8] = {0,0,0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + CYTHON_FALLTHROUGH; + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_nomnu", 1, 8, 8, 1); __PYX_ERR(0, 162, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_nomnu", 1, 8, 8, 2); __PYX_ERR(0, 162, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_nomnu", 1, 8, 8, 3); __PYX_ERR(0, 162, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Or0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_nomnu", 1, 8, 8, 4); __PYX_ERR(0, 162, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wp)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_nomnu", 1, 8, 8, 5); __PYX_ERR(0, 162, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 6: + if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_apiv)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_nomnu", 1, 8, 8, 6); __PYX_ERR(0, 162, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 7: + if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wa)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_nomnu", 1, 8, 8, 7); __PYX_ERR(0, 162, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "wpwacdm_inv_efunc_nomnu") < 0)) __PYX_ERR(0, 162, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 8) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 162, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 162, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 162, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 162, __pyx_L3_error) + __pyx_v_Or0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_Or0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 163, __pyx_L3_error) + __pyx_v_wp = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_wp == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 163, __pyx_L3_error) + __pyx_v_apiv = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_apiv == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 163, __pyx_L3_error) + __pyx_v_wa = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_wa == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 163, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc_nomnu", 1, 8, 8, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 162, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.wpwacdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_38wpwacdm_inv_efunc_nomnu(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0, __pyx_v_Or0, __pyx_v_wp, __pyx_v_apiv, __pyx_v_wa); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_38wpwacdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Or0, double __pyx_v_wp, double __pyx_v_apiv, double __pyx_v_wa) { + double __pyx_v_opz; + PyObject *__pyx_v_Odescl = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + double __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + __Pyx_RefNannySetupContext("wpwacdm_inv_efunc_nomnu", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":164 + * def wpwacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, + * double Or0, double wp, double apiv, double wa): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":165 + * double Or0, double wp, double apiv, double wa): + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) # <<<<<<<<<<<<<< + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + * Ode0 * Odescl, -0.5) + */ + __pyx_t_1 = ((-3. * __pyx_v_wa) * __pyx_v_z); + if (unlikely(__pyx_v_opz == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 165, __pyx_L1_error) + } + __pyx_t_2 = PyFloat_FromDouble((pow(__pyx_v_opz, (3. * ((1. + __pyx_v_wp) + (__pyx_v_apiv * __pyx_v_wa)))) * exp((__pyx_t_1 / __pyx_v_opz)))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 165, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_Odescl = __pyx_t_2; + __pyx_t_2 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":166 + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + # <<<<<<<<<<<<<< + * Ode0 * Odescl, -0.5) + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyFloat_FromDouble((((((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0) * __pyx_v_opz) + __pyx_v_Ok0) * pow(__pyx_v_opz, 2.0))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 166, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":167 + * cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + * Ode0 * Odescl, -0.5) # <<<<<<<<<<<<<< + * + * # With massive neutrinos + */ + __pyx_t_3 = PyFloat_FromDouble(__pyx_v_Ode0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 167, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyNumber_Multiply(__pyx_t_3, __pyx_v_Odescl); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 167, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":166 + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + # <<<<<<<<<<<<<< + * Ode0 * Odescl, -0.5) + * + */ + __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 166, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_1 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 166, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyFloat_FromDouble(pow(__pyx_t_1, -0.5)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 166, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":162 + * + * # Massless neutrinos + * def wpwacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double wp, double apiv, double wa): + * cdef double opz = 1.0 + z + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.wpwacdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_Odescl); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":170 + * + * # With massive neutrinos + * def wpwacdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double wp, + * double apiv, double wa): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_41wpwacdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_41wpwacdm_inv_efunc = {"wpwacdm_inv_efunc", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_41wpwacdm_inv_efunc, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_41wpwacdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + double __pyx_v_Ogamma0; + double __pyx_v_NeffPerNu; + int __pyx_v_nmasslessnu; + PyObject *__pyx_v_nu_y = 0; + double __pyx_v_wp; + double __pyx_v_apiv; + double __pyx_v_wa; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("wpwacdm_inv_efunc (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,&__pyx_n_s_Ogamma0,&__pyx_n_s_NeffPerNu,&__pyx_n_s_nmasslessnu,&__pyx_n_s_nu_y,&__pyx_n_s_wp,&__pyx_n_s_apiv,&__pyx_n_s_wa,0}; + PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); + CYTHON_FALLTHROUGH; + case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); + CYTHON_FALLTHROUGH; + case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); + CYTHON_FALLTHROUGH; + case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + CYTHON_FALLTHROUGH; + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc", 1, 11, 11, 1); __PYX_ERR(0, 170, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc", 1, 11, 11, 2); __PYX_ERR(0, 170, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc", 1, 11, 11, 3); __PYX_ERR(0, 170, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ogamma0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc", 1, 11, 11, 4); __PYX_ERR(0, 170, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_NeffPerNu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc", 1, 11, 11, 5); __PYX_ERR(0, 170, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 6: + if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nmasslessnu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc", 1, 11, 11, 6); __PYX_ERR(0, 170, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 7: + if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nu_y)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc", 1, 11, 11, 7); __PYX_ERR(0, 170, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 8: + if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wp)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc", 1, 11, 11, 8); __PYX_ERR(0, 170, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 9: + if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_apiv)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc", 1, 11, 11, 9); __PYX_ERR(0, 170, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 10: + if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wa)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc", 1, 11, 11, 10); __PYX_ERR(0, 170, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "wpwacdm_inv_efunc") < 0)) __PYX_ERR(0, 170, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + values[8] = PyTuple_GET_ITEM(__pyx_args, 8); + values[9] = PyTuple_GET_ITEM(__pyx_args, 9); + values[10] = PyTuple_GET_ITEM(__pyx_args, 10); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 170, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 170, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 170, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 170, __pyx_L3_error) + __pyx_v_Ogamma0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_Ogamma0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 171, __pyx_L3_error) + __pyx_v_NeffPerNu = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_NeffPerNu == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 171, __pyx_L3_error) + __pyx_v_nmasslessnu = __Pyx_PyInt_As_int(values[6]); if (unlikely((__pyx_v_nmasslessnu == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 171, __pyx_L3_error) + __pyx_v_nu_y = ((PyObject*)values[7]); + __pyx_v_wp = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_wp == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 171, __pyx_L3_error) + __pyx_v_apiv = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_apiv == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 172, __pyx_L3_error) + __pyx_v_wa = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_wa == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 172, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("wpwacdm_inv_efunc", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 170, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.wpwacdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_nu_y), (&PyList_Type), 1, "nu_y", 1))) __PYX_ERR(0, 171, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_40wpwacdm_inv_efunc(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0, __pyx_v_Ogamma0, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y, __pyx_v_wp, __pyx_v_apiv, __pyx_v_wa); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_40wpwacdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y, double __pyx_v_wp, double __pyx_v_apiv, double __pyx_v_wa) { + double __pyx_v_opz; + double __pyx_v_Or0; + PyObject *__pyx_v_Odescl = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + double __pyx_t_4; + __Pyx_RefNannySetupContext("wpwacdm_inv_efunc", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":174 + * double apiv, double wa): + * + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":175 + * + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) # <<<<<<<<<<<<<< + * cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0 * Odescl, -0.5) + */ + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_Ogamma0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 175, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __pyx_f_7astropy_9cosmology_17scalar_inv_efuncs_nufunc(__pyx_v_opz, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 175, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyFloat_AddCObj(__pyx_float_1_0, __pyx_t_2, 1.0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 175, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyNumber_Multiply(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 175, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 175, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v_Or0 = __pyx_t_4; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":176 + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) # <<<<<<<<<<<<<< + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0 * Odescl, -0.5) + * + */ + __pyx_t_4 = ((-3. * __pyx_v_wa) * __pyx_v_z); + if (unlikely(__pyx_v_opz == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 176, __pyx_L1_error) + } + __pyx_t_2 = PyFloat_FromDouble((pow(__pyx_v_opz, (3. * ((1. + __pyx_v_wp) + (__pyx_v_apiv * __pyx_v_wa)))) * exp((__pyx_t_4 / __pyx_v_opz)))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 176, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_Odescl = __pyx_t_2; + __pyx_t_2 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":177 + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0 * Odescl, -0.5) # <<<<<<<<<<<<<< + * + * ######## w0wzCDM + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyFloat_FromDouble((((((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0) * __pyx_v_opz) + __pyx_v_Ok0) * pow(__pyx_v_opz, 2.0))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 177, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyFloat_FromDouble(__pyx_v_Ode0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 177, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = PyNumber_Multiply(__pyx_t_3, __pyx_v_Odescl); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 177, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 177, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 177, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyFloat_FromDouble(pow(__pyx_t_4, -0.5)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 177, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":170 + * + * # With massive neutrinos + * def wpwacdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double wp, + * double apiv, double wa): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.wpwacdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_Odescl); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":181 + * ######## w0wzCDM + * # No relativistic species + * def w0wzcdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double w0, double wz): + * cdef double opz = 1.0 + z + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_43w0wzcdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_43w0wzcdm_inv_efunc_norel = {"w0wzcdm_inv_efunc_norel", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_43w0wzcdm_inv_efunc_norel, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_43w0wzcdm_inv_efunc_norel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + double __pyx_v_w0; + double __pyx_v_wz; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("w0wzcdm_inv_efunc_norel (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,&__pyx_n_s_w0,&__pyx_n_s_wz,0}; + PyObject* values[6] = {0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc_norel", 1, 6, 6, 1); __PYX_ERR(0, 181, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc_norel", 1, 6, 6, 2); __PYX_ERR(0, 181, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc_norel", 1, 6, 6, 3); __PYX_ERR(0, 181, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc_norel", 1, 6, 6, 4); __PYX_ERR(0, 181, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wz)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc_norel", 1, 6, 6, 5); __PYX_ERR(0, 181, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "w0wzcdm_inv_efunc_norel") < 0)) __PYX_ERR(0, 181, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 6) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 181, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 181, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 181, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 181, __pyx_L3_error) + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 182, __pyx_L3_error) + __pyx_v_wz = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_wz == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 182, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc_norel", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 181, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.w0wzcdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_42w0wzcdm_inv_efunc_norel(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0, __pyx_v_w0, __pyx_v_wz); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_42w0wzcdm_inv_efunc_norel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_w0, double __pyx_v_wz) { + double __pyx_v_opz; + PyObject *__pyx_v_Odescl = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + double __pyx_t_4; + __Pyx_RefNannySetupContext("w0wzcdm_inv_efunc_norel", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":183 + * def w0wzcdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, + * double w0, double wz): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0 * Odescl, -0.5) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":184 + * double w0, double wz): + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) # <<<<<<<<<<<<<< + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0 * Odescl, -0.5) + * + */ + __pyx_t_1 = PyFloat_FromDouble((pow(__pyx_v_opz, (3. * ((1. + __pyx_v_w0) - __pyx_v_wz))) * exp(((-3. * __pyx_v_wz) * __pyx_v_z)))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 184, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_Odescl = __pyx_t_1; + __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":185 + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0 * Odescl, -0.5) # <<<<<<<<<<<<<< + * + * # Massless neutrinos + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyFloat_FromDouble((pow(__pyx_v_opz, 2.0) * ((__pyx_v_opz * __pyx_v_Om0) + __pyx_v_Ok0))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 185, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_Ode0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Multiply(__pyx_t_2, __pyx_v_Odescl); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 185, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 185, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyFloat_FromDouble(pow(__pyx_t_4, -0.5)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 185, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":181 + * ######## w0wzCDM + * # No relativistic species + * def w0wzcdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double w0, double wz): + * cdef double opz = 1.0 + z + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.w0wzcdm_inv_efunc_norel", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_Odescl); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":188 + * + * # Massless neutrinos + * def w0wzcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double w0, double wz): + * cdef double opz = 1.0 + z + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_45w0wzcdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_45w0wzcdm_inv_efunc_nomnu = {"w0wzcdm_inv_efunc_nomnu", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_45w0wzcdm_inv_efunc_nomnu, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_45w0wzcdm_inv_efunc_nomnu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + double __pyx_v_Or0; + double __pyx_v_w0; + double __pyx_v_wz; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("w0wzcdm_inv_efunc_nomnu (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,&__pyx_n_s_Or0,&__pyx_n_s_w0,&__pyx_n_s_wz,0}; + PyObject* values[7] = {0,0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc_nomnu", 1, 7, 7, 1); __PYX_ERR(0, 188, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc_nomnu", 1, 7, 7, 2); __PYX_ERR(0, 188, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc_nomnu", 1, 7, 7, 3); __PYX_ERR(0, 188, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Or0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc_nomnu", 1, 7, 7, 4); __PYX_ERR(0, 188, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc_nomnu", 1, 7, 7, 5); __PYX_ERR(0, 188, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 6: + if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wz)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc_nomnu", 1, 7, 7, 6); __PYX_ERR(0, 188, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "w0wzcdm_inv_efunc_nomnu") < 0)) __PYX_ERR(0, 188, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 188, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 188, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 188, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 188, __pyx_L3_error) + __pyx_v_Or0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_Or0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 189, __pyx_L3_error) + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 189, __pyx_L3_error) + __pyx_v_wz = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_wz == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 189, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc_nomnu", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 188, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.w0wzcdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_44w0wzcdm_inv_efunc_nomnu(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0, __pyx_v_Or0, __pyx_v_w0, __pyx_v_wz); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_44w0wzcdm_inv_efunc_nomnu(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Or0, double __pyx_v_w0, double __pyx_v_wz) { + double __pyx_v_opz; + PyObject *__pyx_v_Odescl = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + double __pyx_t_4; + __Pyx_RefNannySetupContext("w0wzcdm_inv_efunc_nomnu", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":190 + * def w0wzcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, + * double Or0, double w0, double wz): + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":191 + * double Or0, double w0, double wz): + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) # <<<<<<<<<<<<<< + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + * Ode0 * Odescl, -0.5) + */ + __pyx_t_1 = PyFloat_FromDouble((pow(__pyx_v_opz, (3. * ((1. + __pyx_v_w0) - __pyx_v_wz))) * exp(((-3. * __pyx_v_wz) * __pyx_v_z)))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 191, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_Odescl = __pyx_t_1; + __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":192 + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + # <<<<<<<<<<<<<< + * Ode0 * Odescl, -0.5) + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyFloat_FromDouble((((((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0) * __pyx_v_opz) + __pyx_v_Ok0) * pow(__pyx_v_opz, 2.0))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":193 + * cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + * Ode0 * Odescl, -0.5) # <<<<<<<<<<<<<< + * + * # With massive neutrinos + */ + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_Ode0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 193, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Multiply(__pyx_t_2, __pyx_v_Odescl); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 193, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":192 + * cdef double opz = 1.0 + z + * cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + # <<<<<<<<<<<<<< + * Ode0 * Odescl, -0.5) + * + */ + __pyx_t_2 = PyNumber_Add(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 192, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyFloat_FromDouble(pow(__pyx_t_4, -0.5)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":188 + * + * # Massless neutrinos + * def w0wzcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double w0, double wz): + * cdef double opz = 1.0 + z + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.w0wzcdm_inv_efunc_nomnu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_Odescl); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":196 + * + * # With massive neutrinos + * def w0wzcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + * double wz): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_47w0wzcdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_47w0wzcdm_inv_efunc = {"w0wzcdm_inv_efunc", (PyCFunction)__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_47w0wzcdm_inv_efunc, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_7astropy_9cosmology_17scalar_inv_efuncs_47w0wzcdm_inv_efunc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_z; + double __pyx_v_Om0; + double __pyx_v_Ode0; + double __pyx_v_Ok0; + double __pyx_v_Ogamma0; + double __pyx_v_NeffPerNu; + int __pyx_v_nmasslessnu; + PyObject *__pyx_v_nu_y = 0; + double __pyx_v_w0; + double __pyx_v_wz; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("w0wzcdm_inv_efunc (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_z,&__pyx_n_s_Om0,&__pyx_n_s_Ode0,&__pyx_n_s_Ok0,&__pyx_n_s_Ogamma0,&__pyx_n_s_NeffPerNu,&__pyx_n_s_nmasslessnu,&__pyx_n_s_nu_y,&__pyx_n_s_w0,&__pyx_n_s_wz,0}; + PyObject* values[10] = {0,0,0,0,0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); + CYTHON_FALLTHROUGH; + case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); + CYTHON_FALLTHROUGH; + case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + CYTHON_FALLTHROUGH; + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Om0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc", 1, 10, 10, 1); __PYX_ERR(0, 196, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ode0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc", 1, 10, 10, 2); __PYX_ERR(0, 196, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ok0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc", 1, 10, 10, 3); __PYX_ERR(0, 196, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Ogamma0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc", 1, 10, 10, 4); __PYX_ERR(0, 196, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_NeffPerNu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc", 1, 10, 10, 5); __PYX_ERR(0, 196, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 6: + if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nmasslessnu)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc", 1, 10, 10, 6); __PYX_ERR(0, 196, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 7: + if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nu_y)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc", 1, 10, 10, 7); __PYX_ERR(0, 196, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 8: + if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_w0)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc", 1, 10, 10, 8); __PYX_ERR(0, 196, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 9: + if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wz)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc", 1, 10, 10, 9); __PYX_ERR(0, 196, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "w0wzcdm_inv_efunc") < 0)) __PYX_ERR(0, 196, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 10) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + values[8] = PyTuple_GET_ITEM(__pyx_args, 8); + values[9] = PyTuple_GET_ITEM(__pyx_args, 9); + } + __pyx_v_z = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 196, __pyx_L3_error) + __pyx_v_Om0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_Om0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 196, __pyx_L3_error) + __pyx_v_Ode0 = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_Ode0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 196, __pyx_L3_error) + __pyx_v_Ok0 = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_Ok0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 196, __pyx_L3_error) + __pyx_v_Ogamma0 = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_Ogamma0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 197, __pyx_L3_error) + __pyx_v_NeffPerNu = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_NeffPerNu == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 197, __pyx_L3_error) + __pyx_v_nmasslessnu = __Pyx_PyInt_As_int(values[6]); if (unlikely((__pyx_v_nmasslessnu == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 197, __pyx_L3_error) + __pyx_v_nu_y = ((PyObject*)values[7]); + __pyx_v_w0 = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_w0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 197, __pyx_L3_error) + __pyx_v_wz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_wz == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 198, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("w0wzcdm_inv_efunc", 1, 10, 10, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 196, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.w0wzcdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_nu_y), (&PyList_Type), 1, "nu_y", 1))) __PYX_ERR(0, 197, __pyx_L1_error) + __pyx_r = __pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_46w0wzcdm_inv_efunc(__pyx_self, __pyx_v_z, __pyx_v_Om0, __pyx_v_Ode0, __pyx_v_Ok0, __pyx_v_Ogamma0, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y, __pyx_v_w0, __pyx_v_wz); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7astropy_9cosmology_17scalar_inv_efuncs_46w0wzcdm_inv_efunc(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_z, double __pyx_v_Om0, double __pyx_v_Ode0, double __pyx_v_Ok0, double __pyx_v_Ogamma0, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y, double __pyx_v_w0, double __pyx_v_wz) { + double __pyx_v_opz; + double __pyx_v_Or0; + PyObject *__pyx_v_Odescl = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + double __pyx_t_4; + __Pyx_RefNannySetupContext("w0wzcdm_inv_efunc", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":200 + * double wz): + * + * cdef double opz = 1.0 + z # <<<<<<<<<<<<<< + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) + */ + __pyx_v_opz = (1.0 + __pyx_v_z); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":201 + * + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) # <<<<<<<<<<<<<< + * cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0 * Odescl, -0.5) + */ + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_Ogamma0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 201, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __pyx_f_7astropy_9cosmology_17scalar_inv_efuncs_nufunc(__pyx_v_opz, __pyx_v_NeffPerNu, __pyx_v_nmasslessnu, __pyx_v_nu_y); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 201, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyFloat_AddCObj(__pyx_float_1_0, __pyx_t_2, 1.0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 201, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyNumber_Multiply(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 201, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 201, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v_Or0 = __pyx_t_4; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":202 + * cdef double opz = 1.0 + z + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) # <<<<<<<<<<<<<< + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0 * Odescl, -0.5) + * + */ + __pyx_t_2 = PyFloat_FromDouble((pow(__pyx_v_opz, (3. * ((1. + __pyx_v_w0) - __pyx_v_wz))) * exp(((-3. * __pyx_v_wz) * __pyx_v_z)))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 202, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_Odescl = __pyx_t_2; + __pyx_t_2 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":203 + * cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + * cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) + * return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0 * Odescl, -0.5) # <<<<<<<<<<<<<< + * + * ######## Neutrino relative density function + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyFloat_FromDouble((((((__pyx_v_opz * __pyx_v_Or0) + __pyx_v_Om0) * __pyx_v_opz) + __pyx_v_Ok0) * pow(__pyx_v_opz, 2.0))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 203, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyFloat_FromDouble(__pyx_v_Ode0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 203, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = PyNumber_Multiply(__pyx_t_3, __pyx_v_Odescl); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 203, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 203, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_4 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_4 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 203, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyFloat_FromDouble(pow(__pyx_t_4, -0.5)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 203, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":196 + * + * # With massive neutrinos + * def w0wzcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + * double wz): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.w0wzcdm_inv_efunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_Odescl); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "astropy/cosmology/scalar_inv_efuncs.pyx":218 + * # contribution -- see any cosmology book + * # The Komatsu reference is: Komatsu et al. 2011, ApJS 192, 18 + * cdef nufunc(double opz, double NeffPerNu, int nmasslessnu, list nu_y): # <<<<<<<<<<<<<< + * cdef int N = len(nu_y) + * cdef double k = 0.3173 / opz + */ + +static PyObject *__pyx_f_7astropy_9cosmology_17scalar_inv_efuncs_nufunc(double __pyx_v_opz, double __pyx_v_NeffPerNu, int __pyx_v_nmasslessnu, PyObject *__pyx_v_nu_y) { + int __pyx_v_N; + double __pyx_v_k; + double __pyx_v_rel_mass_sum; + unsigned int __pyx_v_i; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + unsigned int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + double __pyx_t_5; + __Pyx_RefNannySetupContext("nufunc", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":219 + * # The Komatsu reference is: Komatsu et al. 2011, ApJS 192, 18 + * cdef nufunc(double opz, double NeffPerNu, int nmasslessnu, list nu_y): + * cdef int N = len(nu_y) # <<<<<<<<<<<<<< + * cdef double k = 0.3173 / opz + * cdef double rel_mass_sum = nmasslessnu + */ + if (unlikely(__pyx_v_nu_y == Py_None)) { + PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); + __PYX_ERR(0, 219, __pyx_L1_error) + } + __pyx_t_1 = PyList_GET_SIZE(__pyx_v_nu_y); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 219, __pyx_L1_error) + __pyx_v_N = __pyx_t_1; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":220 + * cdef nufunc(double opz, double NeffPerNu, int nmasslessnu, list nu_y): + * cdef int N = len(nu_y) + * cdef double k = 0.3173 / opz # <<<<<<<<<<<<<< + * cdef double rel_mass_sum = nmasslessnu + * cdef unsigned int i + */ + if (unlikely(__pyx_v_opz == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 220, __pyx_L1_error) + } + __pyx_v_k = (0.3173 / __pyx_v_opz); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":221 + * cdef int N = len(nu_y) + * cdef double k = 0.3173 / opz + * cdef double rel_mass_sum = nmasslessnu # <<<<<<<<<<<<<< + * cdef unsigned int i + * for i in range(N): + */ + __pyx_v_rel_mass_sum = __pyx_v_nmasslessnu; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":223 + * cdef double rel_mass_sum = nmasslessnu + * cdef unsigned int i + * for i in range(N): # <<<<<<<<<<<<<< + * rel_mass_sum += pow(1.0 + (k * nu_y[i])**1.83, 0.54644808743) + * return 0.22710731766 * NeffPerNu * rel_mass_sum + */ + __pyx_t_2 = __pyx_v_N; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_i = __pyx_t_3; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":224 + * cdef unsigned int i + * for i in range(N): + * rel_mass_sum += pow(1.0 + (k * nu_y[i])**1.83, 0.54644808743) # <<<<<<<<<<<<<< + * return 0.22710731766 * NeffPerNu * rel_mass_sum + */ + if (unlikely(__pyx_v_nu_y == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(0, 224, __pyx_L1_error) + } + __pyx_t_4 = __Pyx_GetItemInt_List(__pyx_v_nu_y, __pyx_v_i, unsigned int, 0, __Pyx_PyInt_From_unsigned_int, 1, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 224, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_5 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 224, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_v_rel_mass_sum = (__pyx_v_rel_mass_sum + pow((1.0 + pow((__pyx_v_k * ((double)__pyx_t_5)), 1.83)), 0.54644808743)); + } + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":225 + * for i in range(N): + * rel_mass_sum += pow(1.0 + (k * nu_y[i])**1.83, 0.54644808743) + * return 0.22710731766 * NeffPerNu * rel_mass_sum # <<<<<<<<<<<<<< + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_4 = PyFloat_FromDouble(((0.22710731766 * __pyx_v_NeffPerNu) * __pyx_v_rel_mass_sum)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 225, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":218 + * # contribution -- see any cosmology book + * # The Komatsu reference is: Komatsu et al. 2011, ApJS 192, 18 + * cdef nufunc(double opz, double NeffPerNu, int nmasslessnu, list nu_y): # <<<<<<<<<<<<<< + * cdef int N = len(nu_y) + * cdef double k = 0.3173 / opz + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("astropy.cosmology.scalar_inv_efuncs.nufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_scalar_inv_efuncs(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_scalar_inv_efuncs}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "scalar_inv_efuncs", + __pyx_k_Cython_inverse_efuncs_for_cosmo, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_n_s_NeffPerNu, __pyx_k_NeffPerNu, sizeof(__pyx_k_NeffPerNu), 0, 0, 1, 1}, + {&__pyx_n_s_Ode0, __pyx_k_Ode0, sizeof(__pyx_k_Ode0), 0, 0, 1, 1}, + {&__pyx_n_s_Odescl, __pyx_k_Odescl, sizeof(__pyx_k_Odescl), 0, 0, 1, 1}, + {&__pyx_n_s_Ogamma0, __pyx_k_Ogamma0, sizeof(__pyx_k_Ogamma0), 0, 0, 1, 1}, + {&__pyx_n_s_Ok0, __pyx_k_Ok0, sizeof(__pyx_k_Ok0), 0, 0, 1, 1}, + {&__pyx_n_s_Om0, __pyx_k_Om0, sizeof(__pyx_k_Om0), 0, 0, 1, 1}, + {&__pyx_n_s_Or0, __pyx_k_Or0, sizeof(__pyx_k_Or0), 0, 0, 1, 1}, + {&__pyx_n_s_apiv, __pyx_k_apiv, sizeof(__pyx_k_apiv), 0, 0, 1, 1}, + {&__pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_k_astropy_cosmology_scalar_inv_efu, sizeof(__pyx_k_astropy_cosmology_scalar_inv_efu), 0, 0, 1, 0}, + {&__pyx_n_s_astropy_cosmology_scalar_inv_efu_2, __pyx_k_astropy_cosmology_scalar_inv_efu_2, sizeof(__pyx_k_astropy_cosmology_scalar_inv_efu_2), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_flcdm_inv_efunc, __pyx_k_flcdm_inv_efunc, sizeof(__pyx_k_flcdm_inv_efunc), 0, 0, 1, 1}, + {&__pyx_n_s_flcdm_inv_efunc_nomnu, __pyx_k_flcdm_inv_efunc_nomnu, sizeof(__pyx_k_flcdm_inv_efunc_nomnu), 0, 0, 1, 1}, + {&__pyx_n_s_flcdm_inv_efunc_norel, __pyx_k_flcdm_inv_efunc_norel, sizeof(__pyx_k_flcdm_inv_efunc_norel), 0, 0, 1, 1}, + {&__pyx_n_s_fw0wacdm_inv_efunc, __pyx_k_fw0wacdm_inv_efunc, sizeof(__pyx_k_fw0wacdm_inv_efunc), 0, 0, 1, 1}, + {&__pyx_n_s_fw0wacdm_inv_efunc_nomnu, __pyx_k_fw0wacdm_inv_efunc_nomnu, sizeof(__pyx_k_fw0wacdm_inv_efunc_nomnu), 0, 0, 1, 1}, + {&__pyx_n_s_fw0wacdm_inv_efunc_norel, __pyx_k_fw0wacdm_inv_efunc_norel, sizeof(__pyx_k_fw0wacdm_inv_efunc_norel), 0, 0, 1, 1}, + {&__pyx_n_s_fwcdm_inv_efunc, __pyx_k_fwcdm_inv_efunc, sizeof(__pyx_k_fwcdm_inv_efunc), 0, 0, 1, 1}, + {&__pyx_n_s_fwcdm_inv_efunc_nomnu, __pyx_k_fwcdm_inv_efunc_nomnu, sizeof(__pyx_k_fwcdm_inv_efunc_nomnu), 0, 0, 1, 1}, + {&__pyx_n_s_fwcdm_inv_efunc_norel, __pyx_k_fwcdm_inv_efunc_norel, sizeof(__pyx_k_fwcdm_inv_efunc_norel), 0, 0, 1, 1}, + {&__pyx_n_s_lcdm_inv_efunc, __pyx_k_lcdm_inv_efunc, sizeof(__pyx_k_lcdm_inv_efunc), 0, 0, 1, 1}, + {&__pyx_n_s_lcdm_inv_efunc_nomnu, __pyx_k_lcdm_inv_efunc_nomnu, sizeof(__pyx_k_lcdm_inv_efunc_nomnu), 0, 0, 1, 1}, + {&__pyx_n_s_lcdm_inv_efunc_norel, __pyx_k_lcdm_inv_efunc_norel, sizeof(__pyx_k_lcdm_inv_efunc_norel), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_nmasslessnu, __pyx_k_nmasslessnu, sizeof(__pyx_k_nmasslessnu), 0, 0, 1, 1}, + {&__pyx_n_s_nu_y, __pyx_k_nu_y, sizeof(__pyx_k_nu_y), 0, 0, 1, 1}, + {&__pyx_n_s_opz, __pyx_k_opz, sizeof(__pyx_k_opz), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_n_s_w0, __pyx_k_w0, sizeof(__pyx_k_w0), 0, 0, 1, 1}, + {&__pyx_n_s_w0wacdm_inv_efunc, __pyx_k_w0wacdm_inv_efunc, sizeof(__pyx_k_w0wacdm_inv_efunc), 0, 0, 1, 1}, + {&__pyx_n_s_w0wacdm_inv_efunc_nomnu, __pyx_k_w0wacdm_inv_efunc_nomnu, sizeof(__pyx_k_w0wacdm_inv_efunc_nomnu), 0, 0, 1, 1}, + {&__pyx_n_s_w0wacdm_inv_efunc_norel, __pyx_k_w0wacdm_inv_efunc_norel, sizeof(__pyx_k_w0wacdm_inv_efunc_norel), 0, 0, 1, 1}, + {&__pyx_n_s_w0wzcdm_inv_efunc, __pyx_k_w0wzcdm_inv_efunc, sizeof(__pyx_k_w0wzcdm_inv_efunc), 0, 0, 1, 1}, + {&__pyx_n_s_w0wzcdm_inv_efunc_nomnu, __pyx_k_w0wzcdm_inv_efunc_nomnu, sizeof(__pyx_k_w0wzcdm_inv_efunc_nomnu), 0, 0, 1, 1}, + {&__pyx_n_s_w0wzcdm_inv_efunc_norel, __pyx_k_w0wzcdm_inv_efunc_norel, sizeof(__pyx_k_w0wzcdm_inv_efunc_norel), 0, 0, 1, 1}, + {&__pyx_n_s_wa, __pyx_k_wa, sizeof(__pyx_k_wa), 0, 0, 1, 1}, + {&__pyx_n_s_wcdm_inv_efunc, __pyx_k_wcdm_inv_efunc, sizeof(__pyx_k_wcdm_inv_efunc), 0, 0, 1, 1}, + {&__pyx_n_s_wcdm_inv_efunc_nomnu, __pyx_k_wcdm_inv_efunc_nomnu, sizeof(__pyx_k_wcdm_inv_efunc_nomnu), 0, 0, 1, 1}, + {&__pyx_n_s_wcdm_inv_efunc_norel, __pyx_k_wcdm_inv_efunc_norel, sizeof(__pyx_k_wcdm_inv_efunc_norel), 0, 0, 1, 1}, + {&__pyx_n_s_wp, __pyx_k_wp, sizeof(__pyx_k_wp), 0, 0, 1, 1}, + {&__pyx_n_s_wpwacdm_inv_efunc, __pyx_k_wpwacdm_inv_efunc, sizeof(__pyx_k_wpwacdm_inv_efunc), 0, 0, 1, 1}, + {&__pyx_n_s_wpwacdm_inv_efunc_nomnu, __pyx_k_wpwacdm_inv_efunc_nomnu, sizeof(__pyx_k_wpwacdm_inv_efunc_nomnu), 0, 0, 1, 1}, + {&__pyx_n_s_wpwacdm_inv_efunc_norel, __pyx_k_wpwacdm_inv_efunc_norel, sizeof(__pyx_k_wpwacdm_inv_efunc_norel), 0, 0, 1, 1}, + {&__pyx_n_s_wz, __pyx_k_wz, sizeof(__pyx_k_wz), 0, 0, 1, 1}, + {&__pyx_n_s_z, __pyx_k_z, sizeof(__pyx_k_z), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 223, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":22 + * ######### LambdaCDM + * # No relativistic species + * def lcdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0): # <<<<<<<<<<<<<< + * cdef double opz = 1.0 + z + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0, -0.5) + */ + __pyx_tuple_ = PyTuple_Pack(5, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_opz); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple_); + __Pyx_GIVEREF(__pyx_tuple_); + __pyx_codeobj__2 = (PyObject*)__Pyx_PyCode_New(4, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple_, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_lcdm_inv_efunc_norel, 22, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__2)) __PYX_ERR(0, 22, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":27 + * + * # Massless neutrinos + * def lcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0): + * cdef double opz = 1.0 + z + */ + __pyx_tuple__3 = PyTuple_Pack(6, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_Or0, __pyx_n_s_opz); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 27, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + __pyx_codeobj__4 = (PyObject*)__Pyx_PyCode_New(5, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__3, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_lcdm_inv_efunc_nomnu, 27, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__4)) __PYX_ERR(0, 27, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":33 + * + * # With massive neutrinos + * def lcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y): + * + */ + __pyx_tuple__5 = PyTuple_Pack(10, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_Ogamma0, __pyx_n_s_NeffPerNu, __pyx_n_s_nmasslessnu, __pyx_n_s_nu_y, __pyx_n_s_opz, __pyx_n_s_Or0); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 33, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + __pyx_codeobj__6 = (PyObject*)__Pyx_PyCode_New(8, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__5, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_lcdm_inv_efunc, 33, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__6)) __PYX_ERR(0, 33, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":42 + * ######## FlatLambdaCDM + * # No relativistic species + * def flcdm_inv_efunc_norel(double z, double Om0, double Ode0): # <<<<<<<<<<<<<< + * return pow((1. + z)**3 * Om0 + Ode0, -0.5) + * + */ + __pyx_tuple__7 = PyTuple_Pack(3, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(0, 42, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__7); + __Pyx_GIVEREF(__pyx_tuple__7); + __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_flcdm_inv_efunc_norel, 42, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) __PYX_ERR(0, 42, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":46 + * + * # Massless neutrinos + * def flcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Or0): # <<<<<<<<<<<<<< + * cdef double opz = 1.0 + z + * return pow(opz**3 * (opz * Or0 + Om0) + Ode0, -0.5) + */ + __pyx_tuple__9 = PyTuple_Pack(5, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Or0, __pyx_n_s_opz); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(0, 46, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__9); + __Pyx_GIVEREF(__pyx_tuple__9); + __pyx_codeobj__10 = (PyObject*)__Pyx_PyCode_New(4, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__9, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_flcdm_inv_efunc_nomnu, 46, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__10)) __PYX_ERR(0, 46, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":51 + * + * # With massive neutrinos + * def flcdm_inv_efunc(double z, double Om0, double Ode0, double Ogamma0, # <<<<<<<<<<<<<< + * double NeffPerNu, int nmasslessnu, list nu_y): + * + */ + __pyx_tuple__11 = PyTuple_Pack(9, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ogamma0, __pyx_n_s_NeffPerNu, __pyx_n_s_nmasslessnu, __pyx_n_s_nu_y, __pyx_n_s_opz, __pyx_n_s_Or0); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 51, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__11); + __Pyx_GIVEREF(__pyx_tuple__11); + __pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(7, 0, 9, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_flcdm_inv_efunc, 51, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) __PYX_ERR(0, 51, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":60 + * ######## wCDM + * # No relativistic species + * def wcdm_inv_efunc_norel(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Ok0, double w0): + * cdef double opz = 1.0 + z + */ + __pyx_tuple__13 = PyTuple_Pack(6, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_w0, __pyx_n_s_opz); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__13); + __Pyx_GIVEREF(__pyx_tuple__13); + __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(5, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_wcdm_inv_efunc_norel, 60, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) __PYX_ERR(0, 60, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":67 + * + * # Massless neutrinos + * def wcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double w0): + * cdef double opz = 1.0 + z + */ + __pyx_tuple__15 = PyTuple_Pack(7, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_Or0, __pyx_n_s_w0, __pyx_n_s_opz); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 67, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__15); + __Pyx_GIVEREF(__pyx_tuple__15); + __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(6, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_wcdm_inv_efunc_nomnu, 67, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) __PYX_ERR(0, 67, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":74 + * + * # With massive neutrinos + * def wcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0): + * + */ + __pyx_tuple__17 = PyTuple_Pack(11, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_Ogamma0, __pyx_n_s_NeffPerNu, __pyx_n_s_nmasslessnu, __pyx_n_s_nu_y, __pyx_n_s_w0, __pyx_n_s_opz, __pyx_n_s_Or0); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(0, 74, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__17); + __Pyx_GIVEREF(__pyx_tuple__17); + __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(9, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_wcdm_inv_efunc, 74, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) __PYX_ERR(0, 74, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":84 + * ######## Flat wCDM + * # No relativistic species + * def fwcdm_inv_efunc_norel(double z, double Om0, double Ode0, double w0): # <<<<<<<<<<<<<< + * cdef double opz = 1.0 + z + * return pow(opz**3 * Om0 + Ode0 * opz**(3. * (1.0 + w0)), -0.5) + */ + __pyx_tuple__19 = PyTuple_Pack(5, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_w0, __pyx_n_s_opz); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 84, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__19); + __Pyx_GIVEREF(__pyx_tuple__19); + __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(4, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_fwcdm_inv_efunc_norel, 84, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(0, 84, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":89 + * + * # Massless neutrinos + * def fwcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Or0, double w0): + * cdef double opz = 1.0 + z + */ + __pyx_tuple__21 = PyTuple_Pack(6, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Or0, __pyx_n_s_w0, __pyx_n_s_opz); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(0, 89, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__21); + __Pyx_GIVEREF(__pyx_tuple__21); + __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(5, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_fwcdm_inv_efunc_nomnu, 89, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(0, 89, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":96 + * + * # With massive neutrinos + * def fwcdm_inv_efunc(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0): + * + */ + __pyx_tuple__23 = PyTuple_Pack(10, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ogamma0, __pyx_n_s_NeffPerNu, __pyx_n_s_nmasslessnu, __pyx_n_s_nu_y, __pyx_n_s_w0, __pyx_n_s_opz, __pyx_n_s_Or0); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 96, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__23); + __Pyx_GIVEREF(__pyx_tuple__23); + __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(8, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_fwcdm_inv_efunc, 96, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) __PYX_ERR(0, 96, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":105 + * ######## w0waCDM + * # No relativistic species + * def w0wacdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double w0, double wa): + * cdef double opz = 1.0 + z + */ + __pyx_tuple__25 = PyTuple_Pack(8, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_w0, __pyx_n_s_wa, __pyx_n_s_opz, __pyx_n_s_Odescl); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(0, 105, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__25); + __Pyx_GIVEREF(__pyx_tuple__25); + __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(6, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_w0wacdm_inv_efunc_norel, 105, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(0, 105, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":112 + * + * # Massless neutrinos + * def w0wacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double w0, double wa): + * cdef double opz = 1.0 + z + */ + __pyx_tuple__27 = PyTuple_Pack(9, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_Or0, __pyx_n_s_w0, __pyx_n_s_wa, __pyx_n_s_opz, __pyx_n_s_Odescl); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(0, 112, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__27); + __Pyx_GIVEREF(__pyx_tuple__27); + __pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(7, 0, 9, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_w0wacdm_inv_efunc_nomnu, 112, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) __PYX_ERR(0, 112, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":119 + * Ode0 * Odescl, -0.5) + * + * def w0wacdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + * double wa): + */ + __pyx_tuple__29 = PyTuple_Pack(13, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_Ogamma0, __pyx_n_s_NeffPerNu, __pyx_n_s_nmasslessnu, __pyx_n_s_nu_y, __pyx_n_s_w0, __pyx_n_s_wa, __pyx_n_s_opz, __pyx_n_s_Or0, __pyx_n_s_Odescl); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(0, 119, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__29); + __Pyx_GIVEREF(__pyx_tuple__29); + __pyx_codeobj__30 = (PyObject*)__Pyx_PyCode_New(10, 0, 13, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__29, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_w0wacdm_inv_efunc, 119, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__30)) __PYX_ERR(0, 119, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":130 + * ######## Flatw0waCDM + * # No relativistic species + * def fw0wacdm_inv_efunc_norel(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double w0, double wa): + * cdef double opz = 1.0 + z + */ + __pyx_tuple__31 = PyTuple_Pack(7, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_w0, __pyx_n_s_wa, __pyx_n_s_opz, __pyx_n_s_Odescl); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(0, 130, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__31); + __Pyx_GIVEREF(__pyx_tuple__31); + __pyx_codeobj__32 = (PyObject*)__Pyx_PyCode_New(5, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__31, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_fw0wacdm_inv_efunc_norel, 130, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__32)) __PYX_ERR(0, 130, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":137 + * + * # Massless neutrinos + * def fw0wacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Or0, double w0, double wa): + * cdef double opz = 1.0 + z + */ + __pyx_tuple__33 = PyTuple_Pack(8, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Or0, __pyx_n_s_w0, __pyx_n_s_wa, __pyx_n_s_opz, __pyx_n_s_Odescl); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__33); + __Pyx_GIVEREF(__pyx_tuple__33); + __pyx_codeobj__34 = (PyObject*)__Pyx_PyCode_New(6, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__33, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_fw0wacdm_inv_efunc_nomnu, 137, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__34)) __PYX_ERR(0, 137, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":144 + * + * # With massive neutrinos + * def fw0wacdm_inv_efunc(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + * double wa): + */ + __pyx_tuple__35 = PyTuple_Pack(12, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ogamma0, __pyx_n_s_NeffPerNu, __pyx_n_s_nmasslessnu, __pyx_n_s_nu_y, __pyx_n_s_w0, __pyx_n_s_wa, __pyx_n_s_opz, __pyx_n_s_Or0, __pyx_n_s_Odescl); if (unlikely(!__pyx_tuple__35)) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__35); + __Pyx_GIVEREF(__pyx_tuple__35); + __pyx_codeobj__36 = (PyObject*)__Pyx_PyCode_New(9, 0, 12, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__35, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_fw0wacdm_inv_efunc, 144, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__36)) __PYX_ERR(0, 144, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":155 + * ######## wpwaCDM + * # No relativistic species + * def wpwacdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double wp, double apiv, double wa): + * cdef double opz = 1.0 + z + */ + __pyx_tuple__37 = PyTuple_Pack(9, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_wp, __pyx_n_s_apiv, __pyx_n_s_wa, __pyx_n_s_opz, __pyx_n_s_Odescl); if (unlikely(!__pyx_tuple__37)) __PYX_ERR(0, 155, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__37); + __Pyx_GIVEREF(__pyx_tuple__37); + __pyx_codeobj__38 = (PyObject*)__Pyx_PyCode_New(7, 0, 9, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__37, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_wpwacdm_inv_efunc_norel, 155, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__38)) __PYX_ERR(0, 155, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":162 + * + * # Massless neutrinos + * def wpwacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double wp, double apiv, double wa): + * cdef double opz = 1.0 + z + */ + __pyx_tuple__39 = PyTuple_Pack(10, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_Or0, __pyx_n_s_wp, __pyx_n_s_apiv, __pyx_n_s_wa, __pyx_n_s_opz, __pyx_n_s_Odescl); if (unlikely(!__pyx_tuple__39)) __PYX_ERR(0, 162, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__39); + __Pyx_GIVEREF(__pyx_tuple__39); + __pyx_codeobj__40 = (PyObject*)__Pyx_PyCode_New(8, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__39, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_wpwacdm_inv_efunc_nomnu, 162, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__40)) __PYX_ERR(0, 162, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":170 + * + * # With massive neutrinos + * def wpwacdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double wp, + * double apiv, double wa): + */ + __pyx_tuple__41 = PyTuple_Pack(14, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_Ogamma0, __pyx_n_s_NeffPerNu, __pyx_n_s_nmasslessnu, __pyx_n_s_nu_y, __pyx_n_s_wp, __pyx_n_s_apiv, __pyx_n_s_wa, __pyx_n_s_opz, __pyx_n_s_Or0, __pyx_n_s_Odescl); if (unlikely(!__pyx_tuple__41)) __PYX_ERR(0, 170, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__41); + __Pyx_GIVEREF(__pyx_tuple__41); + __pyx_codeobj__42 = (PyObject*)__Pyx_PyCode_New(11, 0, 14, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__41, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_wpwacdm_inv_efunc, 170, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__42)) __PYX_ERR(0, 170, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":181 + * ######## w0wzCDM + * # No relativistic species + * def w0wzcdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double w0, double wz): + * cdef double opz = 1.0 + z + */ + __pyx_tuple__43 = PyTuple_Pack(8, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_w0, __pyx_n_s_wz, __pyx_n_s_opz, __pyx_n_s_Odescl); if (unlikely(!__pyx_tuple__43)) __PYX_ERR(0, 181, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__43); + __Pyx_GIVEREF(__pyx_tuple__43); + __pyx_codeobj__44 = (PyObject*)__Pyx_PyCode_New(6, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__43, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_w0wzcdm_inv_efunc_norel, 181, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__44)) __PYX_ERR(0, 181, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":188 + * + * # Massless neutrinos + * def w0wzcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double w0, double wz): + * cdef double opz = 1.0 + z + */ + __pyx_tuple__45 = PyTuple_Pack(9, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_Or0, __pyx_n_s_w0, __pyx_n_s_wz, __pyx_n_s_opz, __pyx_n_s_Odescl); if (unlikely(!__pyx_tuple__45)) __PYX_ERR(0, 188, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__45); + __Pyx_GIVEREF(__pyx_tuple__45); + __pyx_codeobj__46 = (PyObject*)__Pyx_PyCode_New(7, 0, 9, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__45, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_w0wzcdm_inv_efunc_nomnu, 188, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__46)) __PYX_ERR(0, 188, __pyx_L1_error) + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":196 + * + * # With massive neutrinos + * def w0wzcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + * double wz): + */ + __pyx_tuple__47 = PyTuple_Pack(13, __pyx_n_s_z, __pyx_n_s_Om0, __pyx_n_s_Ode0, __pyx_n_s_Ok0, __pyx_n_s_Ogamma0, __pyx_n_s_NeffPerNu, __pyx_n_s_nmasslessnu, __pyx_n_s_nu_y, __pyx_n_s_w0, __pyx_n_s_wz, __pyx_n_s_opz, __pyx_n_s_Or0, __pyx_n_s_Odescl); if (unlikely(!__pyx_tuple__47)) __PYX_ERR(0, 196, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__47); + __Pyx_GIVEREF(__pyx_tuple__47); + __pyx_codeobj__48 = (PyObject*)__Pyx_PyCode_New(10, 0, 13, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__47, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_astropy_cosmology_scalar_inv_efu, __pyx_n_s_w0wzcdm_inv_efunc, 196, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__48)) __PYX_ERR(0, 196, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + __pyx_float_1_0 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_float_1_0)) __PYX_ERR(0, 1, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +#if PY_MAJOR_VERSION < 3 +PyMODINIT_FUNC initscalar_inv_efuncs(void); /*proto*/ +PyMODINIT_FUNC initscalar_inv_efuncs(void) +#else +PyMODINIT_FUNC PyInit_scalar_inv_efuncs(void); /*proto*/ +PyMODINIT_FUNC PyInit_scalar_inv_efuncs(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + result = PyDict_SetItemString(moddict, to_name, value); + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static int __pyx_pymod_exec_scalar_inv_efuncs(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; + #endif + #if CYTHON_REFNANNY + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); + if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); + } + #endif + __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_scalar_inv_efuncs(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("scalar_inv_efuncs", __pyx_methods, __pyx_k_Cython_inverse_efuncs_for_cosmo, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + #if CYTHON_COMPILING_IN_PYPY + Py_INCREF(__pyx_b); + #endif + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_astropy__cosmology__scalar_inv_efuncs) { + if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "astropy.cosmology.scalar_inv_efuncs")) { + if (unlikely(PyDict_SetItemString(modules, "astropy.cosmology.scalar_inv_efuncs", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global init code ---*/ + /*--- Variable export code ---*/ + /*--- Function export code ---*/ + /*--- Type init code ---*/ + /*--- Type import code ---*/ + /*--- Variable import code ---*/ + /*--- Function import code ---*/ + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":22 + * ######### LambdaCDM + * # No relativistic species + * def lcdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0): # <<<<<<<<<<<<<< + * cdef double opz = 1.0 + z + * return pow(opz**2 * (opz * Om0 + Ok0) + Ode0, -0.5) + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_1lcdm_inv_efunc_norel, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_lcdm_inv_efunc_norel, __pyx_t_1) < 0) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":27 + * + * # Massless neutrinos + * def lcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0): + * cdef double opz = 1.0 + z + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_3lcdm_inv_efunc_nomnu, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 27, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_lcdm_inv_efunc_nomnu, __pyx_t_1) < 0) __PYX_ERR(0, 27, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":33 + * + * # With massive neutrinos + * def lcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y): + * + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_5lcdm_inv_efunc, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_lcdm_inv_efunc, __pyx_t_1) < 0) __PYX_ERR(0, 33, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":42 + * ######## FlatLambdaCDM + * # No relativistic species + * def flcdm_inv_efunc_norel(double z, double Om0, double Ode0): # <<<<<<<<<<<<<< + * return pow((1. + z)**3 * Om0 + Ode0, -0.5) + * + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_7flcdm_inv_efunc_norel, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 42, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_flcdm_inv_efunc_norel, __pyx_t_1) < 0) __PYX_ERR(0, 42, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":46 + * + * # Massless neutrinos + * def flcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Or0): # <<<<<<<<<<<<<< + * cdef double opz = 1.0 + z + * return pow(opz**3 * (opz * Or0 + Om0) + Ode0, -0.5) + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_9flcdm_inv_efunc_nomnu, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 46, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_flcdm_inv_efunc_nomnu, __pyx_t_1) < 0) __PYX_ERR(0, 46, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":51 + * + * # With massive neutrinos + * def flcdm_inv_efunc(double z, double Om0, double Ode0, double Ogamma0, # <<<<<<<<<<<<<< + * double NeffPerNu, int nmasslessnu, list nu_y): + * + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_11flcdm_inv_efunc, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 51, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_flcdm_inv_efunc, __pyx_t_1) < 0) __PYX_ERR(0, 51, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":60 + * ######## wCDM + * # No relativistic species + * def wcdm_inv_efunc_norel(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Ok0, double w0): + * cdef double opz = 1.0 + z + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_13wcdm_inv_efunc_norel, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_wcdm_inv_efunc_norel, __pyx_t_1) < 0) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":67 + * + * # Massless neutrinos + * def wcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double w0): + * cdef double opz = 1.0 + z + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_15wcdm_inv_efunc_nomnu, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_wcdm_inv_efunc_nomnu, __pyx_t_1) < 0) __PYX_ERR(0, 67, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":74 + * + * # With massive neutrinos + * def wcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0): + * + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_17wcdm_inv_efunc, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 74, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_wcdm_inv_efunc, __pyx_t_1) < 0) __PYX_ERR(0, 74, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":84 + * ######## Flat wCDM + * # No relativistic species + * def fwcdm_inv_efunc_norel(double z, double Om0, double Ode0, double w0): # <<<<<<<<<<<<<< + * cdef double opz = 1.0 + z + * return pow(opz**3 * Om0 + Ode0 * opz**(3. * (1.0 + w0)), -0.5) + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_19fwcdm_inv_efunc_norel, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 84, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_fwcdm_inv_efunc_norel, __pyx_t_1) < 0) __PYX_ERR(0, 84, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":89 + * + * # Massless neutrinos + * def fwcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Or0, double w0): + * cdef double opz = 1.0 + z + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_21fwcdm_inv_efunc_nomnu, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 89, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_fwcdm_inv_efunc_nomnu, __pyx_t_1) < 0) __PYX_ERR(0, 89, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":96 + * + * # With massive neutrinos + * def fwcdm_inv_efunc(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0): + * + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_23fwcdm_inv_efunc, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 96, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_fwcdm_inv_efunc, __pyx_t_1) < 0) __PYX_ERR(0, 96, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":105 + * ######## w0waCDM + * # No relativistic species + * def w0wacdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double w0, double wa): + * cdef double opz = 1.0 + z + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_25w0wacdm_inv_efunc_norel, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 105, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_w0wacdm_inv_efunc_norel, __pyx_t_1) < 0) __PYX_ERR(0, 105, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":112 + * + * # Massless neutrinos + * def w0wacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double w0, double wa): + * cdef double opz = 1.0 + z + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_27w0wacdm_inv_efunc_nomnu, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 112, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_w0wacdm_inv_efunc_nomnu, __pyx_t_1) < 0) __PYX_ERR(0, 112, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":119 + * Ode0 * Odescl, -0.5) + * + * def w0wacdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + * double wa): + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_29w0wacdm_inv_efunc, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 119, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_w0wacdm_inv_efunc, __pyx_t_1) < 0) __PYX_ERR(0, 119, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":130 + * ######## Flatw0waCDM + * # No relativistic species + * def fw0wacdm_inv_efunc_norel(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double w0, double wa): + * cdef double opz = 1.0 + z + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_31fw0wacdm_inv_efunc_norel, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 130, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_fw0wacdm_inv_efunc_norel, __pyx_t_1) < 0) __PYX_ERR(0, 130, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":137 + * + * # Massless neutrinos + * def fw0wacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Or0, double w0, double wa): + * cdef double opz = 1.0 + z + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_33fw0wacdm_inv_efunc_nomnu, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_fw0wacdm_inv_efunc_nomnu, __pyx_t_1) < 0) __PYX_ERR(0, 137, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":144 + * + * # With massive neutrinos + * def fw0wacdm_inv_efunc(double z, double Om0, double Ode0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + * double wa): + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_35fw0wacdm_inv_efunc, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_fw0wacdm_inv_efunc, __pyx_t_1) < 0) __PYX_ERR(0, 144, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":155 + * ######## wpwaCDM + * # No relativistic species + * def wpwacdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double wp, double apiv, double wa): + * cdef double opz = 1.0 + z + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_37wpwacdm_inv_efunc_norel, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 155, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_wpwacdm_inv_efunc_norel, __pyx_t_1) < 0) __PYX_ERR(0, 155, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":162 + * + * # Massless neutrinos + * def wpwacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double wp, double apiv, double wa): + * cdef double opz = 1.0 + z + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_39wpwacdm_inv_efunc_nomnu, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 162, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_wpwacdm_inv_efunc_nomnu, __pyx_t_1) < 0) __PYX_ERR(0, 162, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":170 + * + * # With massive neutrinos + * def wpwacdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double wp, + * double apiv, double wa): + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_41wpwacdm_inv_efunc, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 170, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_wpwacdm_inv_efunc, __pyx_t_1) < 0) __PYX_ERR(0, 170, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":181 + * ######## w0wzCDM + * # No relativistic species + * def w0wzcdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double w0, double wz): + * cdef double opz = 1.0 + z + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_43w0wzcdm_inv_efunc_norel, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 181, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_w0wzcdm_inv_efunc_norel, __pyx_t_1) < 0) __PYX_ERR(0, 181, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":188 + * + * # Massless neutrinos + * def w0wzcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Or0, double w0, double wz): + * cdef double opz = 1.0 + z + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_45w0wzcdm_inv_efunc_nomnu, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 188, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_w0wzcdm_inv_efunc_nomnu, __pyx_t_1) < 0) __PYX_ERR(0, 188, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":196 + * + * # With massive neutrinos + * def w0wzcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, # <<<<<<<<<<<<<< + * double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + * double wz): + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7astropy_9cosmology_17scalar_inv_efuncs_47w0wzcdm_inv_efunc, NULL, __pyx_n_s_astropy_cosmology_scalar_inv_efu_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 196, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_w0wzcdm_inv_efunc, __pyx_t_1) < 0) __PYX_ERR(0, 196, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "astropy/cosmology/scalar_inv_efuncs.pyx":1 + * """ Cython inverse efuncs for cosmology integrals""" # <<<<<<<<<<<<<< + * #cython boundcheck=False + * + */ + __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init astropy.cosmology.scalar_inv_efuncs", 0, __pyx_lineno, __pyx_filename); + } + Py_DECREF(__pyx_m); __pyx_m = 0; + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init astropy.cosmology.scalar_inv_efuncs"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule((char *)modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* ArgTypeTest */ +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", + name, type->tp_name, Py_TYPE(obj)->tp_name); + return 0; +} + +/* PyFloatBinop */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyFloat_AddCObj(PyObject *op1, PyObject *op2, double floatval, CYTHON_UNUSED int inplace) { + const double a = floatval; + double b, result; + if (likely(PyFloat_CheckExact(op2))) { + b = PyFloat_AS_DOUBLE(op2); + } else + #if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(op2))) { + b = (double) PyInt_AS_LONG(op2); + } else + #endif + if (likely(PyLong_CheckExact(op2))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)op2)->ob_digit; + const Py_ssize_t size = Py_SIZE(op2); + switch (size) { + case 0: b = 0.0; break; + case -1: b = -(double) digits[0]; break; + case 1: b = (double) digits[0]; break; + case -2: + case 2: + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (1 * PyLong_SHIFT < 53))) { + b = (double) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + if ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { + if (size == -2) + b = -b; + break; + } + } + case -3: + case 3: + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53))) { + b = (double) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + if ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { + if (size == -3) + b = -b; + break; + } + } + case -4: + case 4: + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53))) { + b = (double) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + if ((8 * sizeof(unsigned long) < 53) || (4 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { + if (size == -4) + b = -b; + break; + } + } + default: + #else + { + #endif + b = PyLong_AsDouble(op2); + if (unlikely(b == -1.0 && PyErr_Occurred())) return NULL; + } + } else { + return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); + } + PyFPE_START_PROTECT("add", return NULL) + result = a + b; + PyFPE_END_PROTECT(result) + return PyFloat_FromDouble(result); +} +#endif + +/* GetItemInt */ + static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { + PyObject *r; + if (!j) return NULL; + r = PyObject_GetItem(o, j); + Py_DECREF(j); + return r; +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyList_GET_SIZE(o); + } + if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyList_GET_SIZE(o)))) { + PyObject *r = PyList_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyTuple_GET_SIZE(o); + } + if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); + if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { + PyObject *r = PyList_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } + else if (PyTuple_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); + if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } else { + PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; + if (likely(m && m->sq_item)) { + if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { + Py_ssize_t l = m->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return NULL; + PyErr_Clear(); + } + } + return m->sq_item(o, i); + } + } +#else + if (is_list || PySequence_Check(o)) { + return PySequence_GetItem(o, i); + } +#endif + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +} + +/* PyErrFetchRestore */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* CLineInTraceback */ + #ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (PyObject_Not(use_cline) != 0) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ + static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ + #include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +/* CIntFromPyVerify */ + #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { + const int neg_one = (int) -1, const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value) { + const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(unsigned int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(unsigned int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(unsigned int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(unsigned int), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { + const int neg_one = (int) -1, const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { + const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(unsigned int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (unsigned int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0]) + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) { + return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) { + return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) { + return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (unsigned int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(unsigned int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, +digits[0]) + case -2: + if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(unsigned int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + unsigned int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (unsigned int) -1; + } + } else { + unsigned int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (unsigned int) -1; + val = __Pyx_PyInt_As_unsigned_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to unsigned int"); + return (unsigned int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to unsigned int"); + return (unsigned int) -1; +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { + const long neg_one = (long) -1, const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { + const long neg_one = (long) -1, const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* FastTypeChecks */ + #if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { + if (likely(err == exc_type)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); + } + return PyErr_GivenExceptionMatches(err, exc_type); +} +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { + if (likely(err == exc_type1 || err == exc_type2)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); + } + return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); +} +#endif + +/* CheckBinaryVersion */ + static int __Pyx_check_binary_version(void) { + char ctversion[4], rtversion[4]; + PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); + PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); + if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { + char message[200]; + PyOS_snprintf(message, sizeof(message), + "compiletime version %s of module '%.100s' " + "does not match runtime version %s", + ctversion, __Pyx_MODULE_NAME, rtversion); + return PyErr_WarnEx(NULL, message, 1); + } + return 0; +} + +/* InitStrings */ + static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + PyErr_Clear(); + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(x); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/astropy/cosmology/scalar_inv_efuncs.pyx b/astropy/cosmology/scalar_inv_efuncs.pyx new file mode 100644 index 0000000..da646d3 --- /dev/null +++ b/astropy/cosmology/scalar_inv_efuncs.pyx @@ -0,0 +1,225 @@ +""" Cython inverse efuncs for cosmology integrals""" +#cython boundcheck=False + +cimport cython +from libc.math cimport exp, pow + +## Inverse efunc methods for various dark energy subclasses +## These take only scalar arguments since that is what the integral +## routines give them. + +## Implementation notes: +## * Using a python list for nu_y seems to be faster than a ndarray, +## given that nu_y generally has a small number of elements, +## even when you turn off bounds checking, etc. +## * Using pow(x, -0.5) is slightly faster than x**(-0.5) and +## even more so than 1.0 / sqrt(x) +## * Hardwiring in the p, 1/p, k, prefac values in nufunc is +## nontrivially faster than declaring them with cdef + +######### LambdaCDM +# No relativistic species +def lcdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0): + cdef double opz = 1.0 + z + return pow(opz**2 * (opz * Om0 + Ok0) + Ode0, -0.5) + +# Massless neutrinos +def lcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, + double Or0): + cdef double opz = 1.0 + z + return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0, -0.5) + +# With massive neutrinos +def lcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, + double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y): + + cdef double opz = 1.0 + z + cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0, -0.5) + +######## FlatLambdaCDM +# No relativistic species +def flcdm_inv_efunc_norel(double z, double Om0, double Ode0): + return pow((1. + z)**3 * Om0 + Ode0, -0.5) + +# Massless neutrinos +def flcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Or0): + cdef double opz = 1.0 + z + return pow(opz**3 * (opz * Or0 + Om0) + Ode0, -0.5) + +# With massive neutrinos +def flcdm_inv_efunc(double z, double Om0, double Ode0, double Ogamma0, + double NeffPerNu, int nmasslessnu, list nu_y): + + cdef double opz = 1.0 + z + cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + return pow(opz**3 * (opz * Or0 + Om0) + Ode0, -0.5) + +######## wCDM +# No relativistic species +def wcdm_inv_efunc_norel(double z, double Om0, double Ode0, + double Ok0, double w0): + cdef double opz = 1.0 + z + return pow(opz**2 * (opz * Om0 + Ok0) + + Ode0 * opz**(3. * (1.0 + w0)), -0.5) + +# Massless neutrinos +def wcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, + double Or0, double w0): + cdef double opz = 1.0 + z + return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + Ode0 * opz**(3. * (1.0 + w0)), -0.5) + +# With massive neutrinos +def wcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, + double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0): + + cdef double opz = 1.0 + z + cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + Ode0 * opz**(3. * (1.0 + w0)), -0.5) + +######## Flat wCDM +# No relativistic species +def fwcdm_inv_efunc_norel(double z, double Om0, double Ode0, double w0): + cdef double opz = 1.0 + z + return pow(opz**3 * Om0 + Ode0 * opz**(3. * (1.0 + w0)), -0.5) + +# Massless neutrinos +def fwcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, + double Or0, double w0): + cdef double opz = 1.0 + z + return pow(opz**3 * (opz * Or0 + Om0) + + Ode0 * opz**(3. * (1.0 + w0)), -0.5) + +# With massive neutrinos +def fwcdm_inv_efunc(double z, double Om0, double Ode0, + double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0): + + cdef double opz = 1.0 + z + cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + return pow(opz**3 * (opz * Or0 + Om0) + Ode0 * opz**(3. * (1.0 + w0)), -0.5) + +######## w0waCDM +# No relativistic species +def w0wacdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, + double w0, double wa): + cdef double opz = 1.0 + z + cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + return pow(opz**2 * (opz * Om0 + Ok0) + Ode0 * Odescl, -0.5) + +# Massless neutrinos +def w0wacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, + double Or0, double w0, double wa): + cdef double opz = 1.0 + z + cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + Ode0 * Odescl, -0.5) + +def w0wacdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, + double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + double wa): + + cdef double opz = 1.0 + z + cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0 * Odescl, -0.5) + +######## Flatw0waCDM +# No relativistic species +def fw0wacdm_inv_efunc_norel(double z, double Om0, double Ode0, + double w0, double wa): + cdef double opz = 1.0 + z + cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + return pow(opz**3 * Om0 + Ode0 * Odescl, -0.5) + +# Massless neutrinos +def fw0wacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, + double Or0, double w0, double wa): + cdef double opz = 1.0 + z + cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + return pow((opz * Or0 + Om0) * opz**3 + Ode0 * Odescl, -0.5) + +# With massive neutrinos +def fw0wacdm_inv_efunc(double z, double Om0, double Ode0, + double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + double wa): + + cdef double opz = 1.0 + z + cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + cdef Odescl = opz**(3. * (1 + w0 + wa)) * exp(-3.0 * wa * z / opz) + return pow((opz * Or0 + Om0) * opz**3 + Ode0 * Odescl, -0.5) + +######## wpwaCDM +# No relativistic species +def wpwacdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, + double wp, double apiv, double wa): + cdef double opz = 1.0 + z + cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) + return pow(opz**2 * (opz * Om0 + Ok0) + Ode0 * Odescl, -0.5) + +# Massless neutrinos +def wpwacdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, + double Or0, double wp, double apiv, double wa): + cdef double opz = 1.0 + z + cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) + return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + Ode0 * Odescl, -0.5) + +# With massive neutrinos +def wpwacdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, + double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double wp, + double apiv, double wa): + + cdef double opz = 1.0 + z + cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + cdef Odescl = opz**(3. * (1. + wp + apiv * wa)) * exp(-3. * wa * z / opz) + return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0 * Odescl, -0.5) + +######## w0wzCDM +# No relativistic species +def w0wzcdm_inv_efunc_norel(double z, double Om0, double Ode0, double Ok0, + double w0, double wz): + cdef double opz = 1.0 + z + cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) + return pow(opz**2 * (opz * Om0 + Ok0) + Ode0 * Odescl, -0.5) + +# Massless neutrinos +def w0wzcdm_inv_efunc_nomnu(double z, double Om0, double Ode0, double Ok0, + double Or0, double w0, double wz): + cdef double opz = 1.0 + z + cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) + return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + + Ode0 * Odescl, -0.5) + +# With massive neutrinos +def w0wzcdm_inv_efunc(double z, double Om0, double Ode0, double Ok0, + double Ogamma0, double NeffPerNu, int nmasslessnu, list nu_y, double w0, + double wz): + + cdef double opz = 1.0 + z + cdef double Or0 = Ogamma0 * (1.0 + nufunc(opz, NeffPerNu, nmasslessnu, nu_y)) + cdef Odescl = opz**(3. * (1. + w0 - wz)) * exp(-3. * wz * z) + return pow((((opz * Or0 + Om0) * opz) + Ok0) * opz**2 + Ode0 * Odescl, -0.5) + +######## Neutrino relative density function +# Scalar equivalent to FLRW.nu_realative_density in core.py +# Please see that for further discussion. +# This should only be called with massive neutrinos (e.g., nu_y is not empty) +# Briefly, this is just a numerical fitting function to the true relationship, +# which is too expensive to want to evaluate directly. The +# constants which appear are: +# p = 1.83 -> numerical fitting constant from Komatsu et al. +# 1/p = 0.54644... -> same constant +# k = 0.3173 -> another fitting constant +# 7/8 (4/11)^(4/3) = 0.2271... -> fermion/boson constant for neutrino +# contribution -- see any cosmology book +# The Komatsu reference is: Komatsu et al. 2011, ApJS 192, 18 +cdef nufunc(double opz, double NeffPerNu, int nmasslessnu, list nu_y): + cdef int N = len(nu_y) + cdef double k = 0.3173 / opz + cdef double rel_mass_sum = nmasslessnu + cdef unsigned int i + for i in range(N): + rel_mass_sum += pow(1.0 + (k * nu_y[i])**1.83, 0.54644808743) + return 0.22710731766 * NeffPerNu * rel_mass_sum diff --git a/astropy/cosmology/setup_package.py b/astropy/cosmology/setup_package.py new file mode 100644 index 0000000..3cd9f7c --- /dev/null +++ b/astropy/cosmology/setup_package.py @@ -0,0 +1,5 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + + +def requires_2to3(): + return False diff --git a/astropy/cosmology/tests/__init__.py b/astropy/cosmology/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/astropy/cosmology/tests/test_cosmology.py b/astropy/cosmology/tests/test_cosmology.py new file mode 100644 index 0000000..58bbbf5 --- /dev/null +++ b/astropy/cosmology/tests/test_cosmology.py @@ -0,0 +1,1567 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from io import StringIO + +import pytest +import numpy as np + +from .. import core, funcs +from ...tests.helper import quantity_allclose as allclose +from ...utils.compat import NUMPY_LT_1_14 +from ... import units as u + +try: + import scipy # pylint: disable=W0611 +except ImportError: + HAS_SCIPY = False +else: + HAS_SCIPY = True + + +def test_init(): + """ Tests to make sure the code refuses inputs it is supposed to""" + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=-0.27) + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Neff=-1) + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, + Tcmb0=u.Quantity([0.0, 2], u.K)) + with pytest.raises(ValueError): + h0bad = u.Quantity([70, 100], u.km / u.s / u.Mpc) + cosmo = core.FlatLambdaCDM(H0=h0bad, Om0=0.27) + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=0.5) + with pytest.raises(ValueError): + bad_mnu = u.Quantity([-0.3, 0.2, 0.1], u.eV) + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=bad_mnu) + with pytest.raises(ValueError): + bad_mnu = u.Quantity([0.15, 0.2, 0.1], u.eV) + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, Neff=2, m_nu=bad_mnu) + with pytest.raises(ValueError): + bad_mnu = u.Quantity([-0.3, 0.2], u.eV) # 2, expecting 3 + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=bad_mnu) + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=-0.04) + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=0.4) + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27) + cosmo.Ob(1) + with pytest.raises(ValueError): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27) + cosmo.Odm(1) + with pytest.raises(TypeError): + core.default_cosmology.validate(4) + + +def test_basic(): + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0, Neff=3.04, + Ob0=0.05) + assert allclose(cosmo.Om0, 0.27) + assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4) + assert allclose(cosmo.Ob0, 0.05) + assert allclose(cosmo.Odm0, 0.27 - 0.05) + # This next test will fail if astropy.const starts returning non-mks + # units by default; see the comment at the top of core.py + assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4) + assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4) + assert allclose(cosmo.Ok0, 0.0) + assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0, + 1.0, rtol=1e-6) + assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) + + cosmo.Onu(1), 1.0, rtol=1e-6) + assert allclose(cosmo.Tcmb0, 2.0 * u.K) + assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5) + assert allclose(cosmo.Neff, 3.04) + assert allclose(cosmo.h, 0.7) + assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc) + + # Make sure setting them as quantities gives the same results + H0 = u.Quantity(70, u.km / (u.s * u.Mpc)) + T = u.Quantity(2.0, u.K) + cosmo = core.FlatLambdaCDM(H0=H0, Om0=0.27, Tcmb0=T, Neff=3.04, Ob0=0.05) + assert allclose(cosmo.Om0, 0.27) + assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4) + assert allclose(cosmo.Ob0, 0.05) + assert allclose(cosmo.Odm0, 0.27 - 0.05) + assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4) + assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4) + assert allclose(cosmo.Ok0, 0.0) + assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0, + 1.0, rtol=1e-6) + assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) + + cosmo.Onu(1), 1.0, rtol=1e-6) + assert allclose(cosmo.Tcmb0, 2.0 * u.K) + assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5) + assert allclose(cosmo.Neff, 3.04) + assert allclose(cosmo.h, 0.7) + assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_units(): + """ Test if the right units are being returned""" + + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0) + assert cosmo.comoving_distance(1.0).unit == u.Mpc + assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc + assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc + assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc + assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc + assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc + assert cosmo.luminosity_distance(1.0).unit == u.Mpc + assert cosmo.lookback_time(1.0).unit == u.Gyr + assert cosmo.lookback_distance(1.0).unit == u.Mpc + assert cosmo.H0.unit == u.km / u.Mpc / u.s + assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s + assert cosmo.Tcmb0.unit == u.K + assert cosmo.Tcmb(1.0).unit == u.K + assert cosmo.Tcmb([0.0, 1.0]).unit == u.K + assert cosmo.Tnu0.unit == u.K + assert cosmo.Tnu(1.0).unit == u.K + assert cosmo.Tnu([0.0, 1.0]).unit == u.K + assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc + assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc + assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin + assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin + assert cosmo.critical_density(1.0).unit == u.g / u.cm ** 3 + assert cosmo.comoving_volume(1.0).unit == u.Mpc ** 3 + assert cosmo.age(1.0).unit == u.Gyr + assert cosmo.distmod(1.0).unit == u.mag + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_distance_broadcast(): + """ Test array shape broadcasting for functions with single + redshift inputs""" + + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, + m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV)) + z = np.linspace(0.1, 1, 6) + z_reshape2d = z.reshape(2, 3) + z_reshape3d = z.reshape(3, 2, 1) + # Things with units + methods = ['comoving_distance', 'luminosity_distance', + 'comoving_transverse_distance', 'angular_diameter_distance', + 'distmod', 'lookback_time', 'age', 'comoving_volume', + 'differential_comoving_volume', 'kpc_comoving_per_arcmin'] + for method in methods: + g = getattr(cosmo, method) + value_flat = g(z) + assert value_flat.shape == z.shape + value_2d = g(z_reshape2d) + assert value_2d.shape == z_reshape2d.shape + value_3d = g(z_reshape3d) + assert value_3d.shape == z_reshape3d.shape + assert value_flat.unit == value_2d.unit + assert value_flat.unit == value_3d.unit + assert allclose(value_flat, value_2d.flatten()) + assert allclose(value_flat, value_3d.flatten()) + + # Also test unitless ones + methods = ['absorption_distance', 'Om', 'Ode', 'Ok', 'H', + 'w', 'de_density_scale', 'Onu', 'Ogamma', + 'nu_relative_density'] + for method in methods: + g = getattr(cosmo, method) + value_flat = g(z) + assert value_flat.shape == z.shape + value_2d = g(z_reshape2d) + assert value_2d.shape == z_reshape2d.shape + value_3d = g(z_reshape3d) + assert value_3d.shape == z_reshape3d.shape + assert allclose(value_flat, value_2d.flatten()) + assert allclose(value_flat, value_3d.flatten()) + + # Test some dark energy models + methods = ['Om', 'Ode', 'w', 'de_density_scale'] + for tcosmo in [core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.5), + core.wCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2), + core.w0waCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wa=-0.2), + core.wpwaCDM(H0=70, Om0=0.27, Ode0=0.5, + wp=-1.2, wa=-0.2, zp=0.9), + core.w0wzCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wz=0.1)]: + for method in methods: + g = getattr(cosmo, method) + value_flat = g(z) + assert value_flat.shape == z.shape + value_2d = g(z_reshape2d) + assert value_2d.shape == z_reshape2d.shape + value_3d = g(z_reshape3d) + assert value_3d.shape == z_reshape3d.shape + assert allclose(value_flat, value_2d.flatten()) + assert allclose(value_flat, value_3d.flatten()) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_clone(): + """ Test clone operation""" + + cosmo = core.FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.27, + Tcmb0=3.0 * u.K) + z = np.linspace(0.1, 3, 15) + + # First, test with no changes, which should return same object + newclone = cosmo.clone() + assert newclone is cosmo + + # Now change H0 + # Note that H0 affects Ode0 because it changes Ogamma0 + newclone = cosmo.clone(H0=60 * u.km / u.s / u.Mpc) + assert newclone is not cosmo + assert newclone.__class__ == cosmo.__class__ + assert newclone.name == cosmo.name + assert not allclose(newclone.H0.value, cosmo.H0.value) + assert allclose(newclone.H0, 60.0 * u.km / u.s / u.Mpc) + assert allclose(newclone.Om0, cosmo.Om0) + assert allclose(newclone.Ok0, cosmo.Ok0) + assert not allclose(newclone.Ogamma0, cosmo.Ogamma0) + assert not allclose(newclone.Onu0, cosmo.Onu0) + assert allclose(newclone.Tcmb0, cosmo.Tcmb0) + assert allclose(newclone.m_nu, cosmo.m_nu) + assert allclose(newclone.Neff, cosmo.Neff) + + # Compare modified version with directly instantiated one + cmp = core.FlatLambdaCDM(H0=60 * u.km / u.s / u.Mpc, Om0=0.27, + Tcmb0=3.0 * u.K) + assert newclone.__class__ == cmp.__class__ + assert newclone.name == cmp.name + assert allclose(newclone.H0, cmp.H0) + assert allclose(newclone.Om0, cmp.Om0) + assert allclose(newclone.Ode0, cmp.Ode0) + assert allclose(newclone.Ok0, cmp.Ok0) + assert allclose(newclone.Ogamma0, cmp.Ogamma0) + assert allclose(newclone.Onu0, cmp.Onu0) + assert allclose(newclone.Tcmb0, cmp.Tcmb0) + assert allclose(newclone.m_nu, cmp.m_nu) + assert allclose(newclone.Neff, cmp.Neff) + assert allclose(newclone.Om(z), cmp.Om(z)) + assert allclose(newclone.H(z), cmp.H(z)) + assert allclose(newclone.luminosity_distance(z), + cmp.luminosity_distance(z)) + + # Now try changing multiple things + newclone = cosmo.clone(name="New name", H0=65 * u.km / u.s / u.Mpc, + Tcmb0=2.8 * u.K) + assert newclone.__class__ == cosmo.__class__ + assert not newclone.name == cosmo.name + assert not allclose(newclone.H0.value, cosmo.H0.value) + assert allclose(newclone.H0, 65.0 * u.km / u.s / u.Mpc) + assert allclose(newclone.Om0, cosmo.Om0) + assert allclose(newclone.Ok0, cosmo.Ok0) + assert not allclose(newclone.Ogamma0, cosmo.Ogamma0) + assert not allclose(newclone.Onu0, cosmo.Onu0) + assert not allclose(newclone.Tcmb0.value, cosmo.Tcmb0.value) + assert allclose(newclone.Tcmb0, 2.8 * u.K) + assert allclose(newclone.m_nu, cosmo.m_nu) + assert allclose(newclone.Neff, cosmo.Neff) + + # And direct comparison + cmp = core.FlatLambdaCDM(name="New name", H0=65 * u.km / u.s / u.Mpc, + Om0=0.27, Tcmb0=2.8 * u.K) + assert newclone.__class__ == cmp.__class__ + assert newclone.name == cmp.name + assert allclose(newclone.H0, cmp.H0) + assert allclose(newclone.Om0, cmp.Om0) + assert allclose(newclone.Ode0, cmp.Ode0) + assert allclose(newclone.Ok0, cmp.Ok0) + assert allclose(newclone.Ogamma0, cmp.Ogamma0) + assert allclose(newclone.Onu0, cmp.Onu0) + assert allclose(newclone.Tcmb0, cmp.Tcmb0) + assert allclose(newclone.m_nu, cmp.m_nu) + assert allclose(newclone.Neff, cmp.Neff) + assert allclose(newclone.Om(z), cmp.Om(z)) + assert allclose(newclone.H(z), cmp.H(z)) + assert allclose(newclone.luminosity_distance(z), + cmp.luminosity_distance(z)) + + # Try a dark energy class, make sure it can handle w params + cosmo = core.w0waCDM(name="test w0wa", H0=70 * u.km / u.s / u.Mpc, + Om0=0.27, Ode0=0.5, wa=0.1, Tcmb0=4.0 * u.K) + newclone = cosmo.clone(w0=-1.1, wa=0.2) + assert newclone.__class__ == cosmo.__class__ + assert newclone.name == cosmo.name + assert allclose(newclone.H0, cosmo.H0) + assert allclose(newclone.Om0, cosmo.Om0) + assert allclose(newclone.Ode0, cosmo.Ode0) + assert allclose(newclone.Ok0, cosmo.Ok0) + assert not allclose(newclone.w0, cosmo.w0) + assert allclose(newclone.w0, -1.1) + assert not allclose(newclone.wa, cosmo.wa) + assert allclose(newclone.wa, 0.2) + + # Now test exception if user passes non-parameter + with pytest.raises(AttributeError): + newclone = cosmo.clone(not_an_arg=4) + + +def test_xtfuncs(): + """ Test of absorption and lookback integrand""" + cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725) + z = np.array([2.0, 3.2]) + assert allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378, + rtol=1e-4) + assert allclose(cosmo.lookback_time_integrand(z), + [0.10333179, 0.04644541], rtol=1e-4) + assert allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402, + rtol=1e-4) + assert allclose(cosmo.abs_distance_integrand(z), + [2.7899584, 3.44104758], rtol=1e-4) + + +def test_repr(): + """ Test string representation of built in classes""" + cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725) + expected = ('LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, ' + 'Ode0=0.5, Tcmb0=2.725 K, Neff=3.04, m_nu=[{}] eV, ' + 'Ob0=None)').format(' 0. 0. 0.' if NUMPY_LT_1_14 else + '0. 0. 0.') + assert str(cosmo) == expected + + cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725, m_nu=u.Quantity(0.01, u.eV)) + expected = ('LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, Ode0=0.5, ' + 'Tcmb0=2.725 K, Neff=3.04, m_nu=[{}] eV, ' + 'Ob0=None)').format(' 0.01 0.01 0.01' if NUMPY_LT_1_14 else + '0.01 0.01 0.01') + assert str(cosmo) == expected + + cosmo = core.FlatLambdaCDM(50.0, 0.27, Tcmb0=3, Ob0=0.05) + expected = ('FlatLambdaCDM(H0=50 km / (Mpc s), Om0=0.27, ' + 'Tcmb0=3 K, Neff=3.04, m_nu=[{}] eV, Ob0=0.05)').format( + ' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.') + assert str(cosmo) == expected + + cosmo = core.wCDM(60.0, 0.27, 0.6, Tcmb0=2.725, w0=-0.8, name='test1') + expected = ('wCDM(name="test1", H0=60 km / (Mpc s), Om0=0.27, ' + 'Ode0=0.6, w0=-0.8, Tcmb0=2.725 K, Neff=3.04, ' + 'm_nu=[{}] eV, Ob0=None)').format( + ' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.') + assert str(cosmo) == expected + + cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6, name='test2') + expected = ('FlatwCDM(name="test2", H0=65 km / (Mpc s), Om0=0.27, ' + 'w0=-0.6, Tcmb0=0 K, Neff=3.04, m_nu=None, Ob0=None)') + assert str(cosmo) == expected + + cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, Tcmb0=2.725, wa=0.1, name='test3') + expected = ('w0waCDM(name="test3", H0=60 km / (Mpc s), Om0=0.25, ' + 'Ode0=0.4, w0=-0.6, wa=0.1, Tcmb0=2.725 K, Neff=3.04, ' + 'm_nu=[{}] eV, Ob0=None)').format( + ' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.') + assert str(cosmo) == expected + + cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2, name='test4', + Ob0=0.0456789) + expected = ('Flatw0waCDM(name="test4", H0=55 km / (Mpc s), Om0=0.35, ' + 'w0=-0.9, Tcmb0=0 K, Neff=3.04, m_nu=None, ' + 'Ob0=0.0457)') + assert str(cosmo) == expected + + cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2, + zp=0.3, name='test5') + expected = ('wpwaCDM(name="test5", H0=50 km / (Mpc s), Om0=0.3, ' + 'Ode0=0.3, wp=-0.9, wa=-0.2, zp=0.3, Tcmb0=0 K, ' + 'Neff=3.04, m_nu=None, Ob0=None)') + assert str(cosmo) == expected + + cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2, Tcmb0=2.725, + m_nu=u.Quantity([0.001, 0.01, 0.015], u.eV)) + expected = ('w0wzCDM(H0=55 km / (Mpc s), Om0=0.4, Ode0=0.8, w0=-1.05, ' + 'wz=-0.2 Tcmb0=2.725 K, Neff=3.04, ' + 'm_nu=[{}] eV, Ob0=None)').format( + ' 0.001 0.01 0.015' if NUMPY_LT_1_14 else + '0.001 0.01 0.015') + assert str(cosmo) == expected + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_flat_z1(): + """ Test a flat cosmology at z=1 against several other on-line + calculators. + """ + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0) + z = 1 + + # Test values were taken from the following web cosmology + # calculators on 27th Feb 2012: + + # Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html + # (http://adsabs.harvard.edu/abs/2006PASP..118.1711W) + # Kempner: http://www.kempner.net/cosmic.php + # iCosmos: http://www.icosmos.co.uk/index.html + + # The order of values below is Wright, Kempner, iCosmos' + assert allclose(cosmo.comoving_distance(z), + [3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4) + assert allclose(cosmo.angular_diameter_distance(z), + [1682.3, 1682.4, 1682.3994] * u.Mpc, rtol=1e-4) + assert allclose(cosmo.luminosity_distance(z), + [6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4) + assert allclose(cosmo.lookback_time(z), + [7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3) + assert allclose(cosmo.lookback_distance(z), + [2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3) + + +def test_zeroing(): + """ Tests if setting params to 0s always respects that""" + # Make sure Ode = 0 behaves that way + cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0) + assert allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0]) + assert allclose(cosmo.Ode(1), 0) + # Ogamma0 and Onu + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0) + assert allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0]) + assert allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0]) + assert allclose(cosmo.Onu(1.5), [0, 0, 0, 0]) + assert allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0]) + # Obaryon + cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Ob0=0.0) + assert allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0]) + + +# This class is to test whether the routines work correctly +# if one only overloads w(z) +class test_cos_sub(core.FLRW): + def __init__(self): + core.FLRW.__init__(self, 70.0, 0.27, 0.73, Tcmb0=0.0, + name="test_cos") + self._w0 = -0.9 + + def w(self, z): + return self._w0 * np.ones_like(z) + +# Similar, but with neutrinos + + +class test_cos_subnu(core.FLRW): + def __init__(self): + core.FLRW.__init__(self, 70.0, 0.27, 0.73, Tcmb0=3.0, + m_nu=0.1 * u.eV, name="test_cos_nu") + self._w0 = -0.8 + + def w(self, z): + return self._w0 * np.ones_like(z) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_de_subclass(): + # This is the comparison object + z = [0.2, 0.4, 0.6, 0.9] + cosmo = core.wCDM(H0=70, Om0=0.27, Ode0=0.73, w0=-0.9, Tcmb0=0.0) + # Values taken from Ned Wrights advanced cosmo calculator, Aug 17 2012 + assert allclose(cosmo.luminosity_distance(z), + [975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3) + # Now try the subclass that only gives w(z) + cosmo = test_cos_sub() + assert allclose(cosmo.luminosity_distance(z), + [975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3) + # Test efunc + assert allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5) + assert allclose(cosmo.efunc([0.5, 1.0]), + [1.31744953, 1.7489240754], rtol=1e-5) + assert allclose(cosmo.inv_efunc([0.5, 1.0]), + [0.75904236, 0.57178011], rtol=1e-5) + # Test de_density_scale + assert allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4) + assert allclose(cosmo.de_density_scale([0.5, 1.0]), + [1.12934694, 1.23114444], rtol=1e-4) + + # Add neutrinos for efunc, inv_efunc + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_varyde_lumdist_mathematica(): + """Tests a few varying dark energy EOS models against a mathematica + computation""" + + # w0wa models + z = np.array([0.2, 0.4, 0.9, 1.2]) + cosmo = core.w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0) + assert allclose(cosmo.w0, -1.1) + assert allclose(cosmo.wa, 0.2) + + assert allclose(cosmo.luminosity_distance(z), + [1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc, rtol=1e-4) + assert allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5) + assert allclose(cosmo.de_density_scale([0.0, 0.5, 1.5]), + [1.0, 0.9246310669529021, 0.9184087000251957]) + + cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0) + assert allclose(cosmo.luminosity_distance(z), + [971.667, 2141.67, 5685.96, 8107.41] * u.Mpc, rtol=1e-4) + cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5, + Tcmb0=0.0) + assert allclose(cosmo.luminosity_distance(z), + [974.087, 2157.08, 5783.92, 8274.08] * u.Mpc, rtol=1e-4) + + # wpwa models + cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5, + Tcmb0=0.0) + assert allclose(cosmo.wp, -1.1) + assert allclose(cosmo.wa, 0.2) + assert allclose(cosmo.zp, 0.5) + assert allclose(cosmo.luminosity_distance(z), + [1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc, rtol=1e-4) + + cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9, + Tcmb0=0.0) + assert allclose(cosmo.wp, -1.1) + assert allclose(cosmo.wa, 0.2) + assert allclose(cosmo.zp, 0.9) + assert allclose(cosmo.luminosity_distance(z), + [1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc, rtol=1e-4) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_matter(): + # Test non-relativistic matter evolution + tcos = core.FlatLambdaCDM(70.0, 0.3, Ob0=0.045) + assert allclose(tcos.Om0, 0.3) + assert allclose(tcos.H0, 70.0 * u.km / u.s / u.Mpc) + assert allclose(tcos.Om(0), 0.3) + assert allclose(tcos.Ob(0), 0.045) + z = np.array([0.0, 0.5, 1.0, 2.0]) + assert allclose(tcos.Om(z), [0.3, 0.59124088, 0.77419355, 0.92045455], + rtol=1e-4) + assert allclose(tcos.Ob(z), + [0.045, 0.08868613, 0.11612903, 0.13806818], rtol=1e-4) + assert allclose(tcos.Odm(z), [0.255, 0.50255474, 0.65806452, 0.78238636], + rtol=1e-4) + # Consistency of dark and baryonic matter evolution with all + # non-relativistic matter + assert allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z)) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_ocurv(): + # Test Ok evolution + # Flat, boring case + tcos = core.FlatLambdaCDM(70.0, 0.3) + assert allclose(tcos.Ok0, 0.0) + assert allclose(tcos.Ok(0), 0.0) + z = np.array([0.0, 0.5, 1.0, 2.0]) + assert allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0], + rtol=1e-6) + + # Not flat + tcos = core.LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K)) + assert allclose(tcos.Ok0, 0.2) + assert allclose(tcos.Ok(0), 0.2) + assert allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692], + rtol=1e-4) + + # Test the sum; note that Ogamma/Onu are 0 + assert allclose(tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z), + [1.0, 1.0, 1.0, 1.0], rtol=1e-5) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_ode(): + # Test Ode evolution, turn off neutrinos, cmb + tcos = core.FlatLambdaCDM(70.0, 0.3, Tcmb0=0) + assert allclose(tcos.Ode0, 0.7) + assert allclose(tcos.Ode(0), 0.7) + z = np.array([0.0, 0.5, 1.0, 2.0]) + assert allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545], + rtol=1e-5) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_ogamma(): + """Tests the effects of changing the temperature of the CMB""" + + # Tested against Ned Wright's advanced cosmology calculator, + # Sep 7 2012. The accuracy of our comparision is limited by + # how many digits it outputs, which limits our test to about + # 0.2% accuracy. The NWACC does not allow one + # to change the number of nuetrino species, fixing that at 3. + # Also, inspection of the NWACC code shows it uses inaccurate + # constants at the 0.2% level (specifically, a_B), + # so we shouldn't expect to match it that well. The integral is + # also done rather crudely. Therefore, we should not expect + # the NWACC to be accurate to better than about 0.5%, which is + # unfortunate, but reflects a problem with it rather than this code. + # More accurate tests below using Mathematica + z = np.array([1.0, 10.0, 500.0, 1000.0]) + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3) + assert allclose(cosmo.angular_diameter_distance(z), + [1651.9, 858.2, 26.855, 13.642] * u.Mpc, rtol=5e-4) + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3) + assert allclose(cosmo.angular_diameter_distance(z), + [1651.8, 857.9, 26.767, 13.582] * u.Mpc, rtol=5e-4) + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3) + assert allclose(cosmo.angular_diameter_distance(z), + [1651.4, 856.6, 26.489, 13.405] * u.Mpc, rtol=5e-4) + + # Next compare with doing the integral numerically in Mathematica, + # which allows more precision in the test. It is at least as + # good as 0.01%, possibly better + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04) + assert allclose(cosmo.angular_diameter_distance(z), + [1651.91, 858.205, 26.8586, 13.6469] * u.Mpc, rtol=1e-5) + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04) + assert allclose(cosmo.angular_diameter_distance(z), + [1651.76, 857.817, 26.7688, 13.5841] * u.Mpc, rtol=1e-5) + cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04) + assert allclose(cosmo.angular_diameter_distance(z), + [1651.21, 856.411, 26.4845, 13.4028] * u.Mpc, rtol=1e-5) + + # Just to be really sure, we also do a version where the integral + # is analytic, which is a Ode = 0 flat universe. In this case + # Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1) + # Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance. + Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0 ** 3 * 2.725 ** 4 / 1.87837e-26 + Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04 + Or0 = (Ogamma0h2 + Onu0h2) / 0.7 ** 2 + Om0 = 1.0 - Or0 + hubdis = (299792.458 / 70.0) * u.Mpc + cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04) + targvals = 2.0 * hubdis * \ + (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0) + assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5) + + # And integers for z + assert allclose(cosmo.comoving_distance(z.astype(np.int)), + targvals, rtol=1e-5) + + # Try Tcmb0 = 4 + Or0 *= (4.0 / 2.725) ** 4 + Om0 = 1.0 - Or0 + cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04) + targvals = 2.0 * hubdis * \ + (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0) + assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_tcmb(): + cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5) + assert allclose(cosmo.Tcmb0, 2.5 * u.K) + assert allclose(cosmo.Tcmb(2), 7.5 * u.K) + z = [0.0, 1.0, 2.0, 3.0, 9.0] + assert allclose(cosmo.Tcmb(z), + [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6) + # Make sure it's the same for integers + z = [0, 1, 2, 3, 9] + assert allclose(cosmo.Tcmb(z), + [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_tnu(): + cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0) + assert allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6) + assert allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6) + z = [0.0, 1.0, 2.0, 3.0] + expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K + assert allclose(cosmo.Tnu(z), expected, rtol=1e-6) + + # Test for integers + z = [0, 1, 2, 3] + assert allclose(cosmo.Tnu(z), expected, rtol=1e-6) + + +def test_efunc_vs_invefunc(): + """ Test that efunc and inv_efunc give inverse values""" + + # Note that all of the subclasses here don't need + # scipy because they don't need to call de_density_scale + # The test following this tests the case where that is needed. + + z0 = 0.5 + z = np.array([0.5, 1.0, 2.0, 5.0]) + + # Below are the 'standard' included cosmologies + # We do the non-standard case in test_efunc_vs_invefunc_flrw, + # since it requires scipy + cosmo = core.LambdaCDM(70, 0.3, 0.5) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.LambdaCDM(70, 0.3, 0.5, m_nu=u.Quantity(0.01, u.eV)) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.FlatLambdaCDM(50.0, 0.27) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.wCDM(60.0, 0.27, 0.6, w0=-0.8) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, wa=0.1) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2, zp=0.3) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2) + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_efunc_vs_invefunc_flrw(): + """ Test that efunc and inv_efunc give inverse values""" + z0 = 0.5 + z = np.array([0.5, 1.0, 2.0, 5.0]) + + # FLRW is abstract, so requires test_cos_sub defined earlier + # This requires scipy, unlike the built-ins, because it + # calls de_density_scale, which has an integral in it + cosmo = test_cos_sub() + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + # Add neutrinos + cosmo = test_cos_subnu() + assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) + assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_kpc_methods(): + cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) + assert allclose(cosmo.arcsec_per_kpc_comoving(3), + 0.0317179167 * u.arcsec / u.kpc) + assert allclose(cosmo.arcsec_per_kpc_proper(3), + 0.1268716668 * u.arcsec / u.kpc) + assert allclose(cosmo.kpc_comoving_per_arcmin(3), + 1891.6753126 * u.kpc / u.arcmin) + assert allclose(cosmo.kpc_proper_per_arcmin(3), + 472.918828 * u.kpc / u.arcmin) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_comoving_volume(): + + c_flat = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0) + c_open = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0) + c_closed = core.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0) + + # test against ned wright's calculator (cubic Gpc) + redshifts = np.array([0.5, 1, 2, 3, 5, 9]) + wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485, + 3654.802]) * u.Gpc**3 + wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363, + 3123.814]) * u.Gpc**3 + wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82, + 358.992]) * u.Gpc**3 + # The wright calculator isn't very accurate, so we use a rather + # modest precision + assert allclose(c_flat.comoving_volume(redshifts), wright_flat, + rtol=1e-2) + assert allclose(c_open.comoving_volume(redshifts), + wright_open, rtol=1e-2) + assert allclose(c_closed.comoving_volume(redshifts), + wright_closed, rtol=1e-2) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_differential_comoving_volume(): + from scipy.integrate import quad + + c_flat = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0) + c_open = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0) + c_closed = core.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0) + + # test that integration of differential_comoving_volume() + # yields same as comoving_volume() + redshifts = np.array([0.5, 1, 2, 3, 5, 9]) + wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485, + 3654.802]) * u.Gpc**3 + wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363, + 3123.814]) * u.Gpc**3 + wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82, + 358.992]) * u.Gpc**3 + # The wright calculator isn't very accurate, so we use a rather + # modest precision. + ftemp = lambda x: c_flat.differential_comoving_volume(x).value + otemp = lambda x: c_open.differential_comoving_volume(x).value + ctemp = lambda x: c_closed.differential_comoving_volume(x).value + # Multiply by solid_angle (4 * pi) + assert allclose(np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0] + for redshift in redshifts]) * u.Mpc**3, + wright_flat, rtol=1e-2) + assert allclose(np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0] + for redshift in redshifts]) * u.Mpc**3, + wright_open, rtol=1e-2) + assert allclose(np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0] + for redshift in redshifts]) * u.Mpc**3, + wright_closed, rtol=1e-2) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_flat_open_closed_icosmo(): + """ Test against the tabulated values generated from icosmo.org + with three example cosmologies (flat, open and closed). + """ + + cosmo_flat = """\ +# from icosmo (icosmo.org) +# Om 0.3 w -1 h 0.7 Ol 0.7 +# z comoving_transvers_dist angular_diameter_dist luminosity_dist + 0.0000000 0.0000000 0.0000000 0.0000000 + 0.16250000 669.77536 576.15085 778.61386 + 0.32500000 1285.5964 970.26143 1703.4152 + 0.50000000 1888.6254 1259.0836 2832.9381 + 0.66250000 2395.5489 1440.9317 3982.6000 + 0.82500000 2855.5732 1564.6976 5211.4210 + 1.0000000 3303.8288 1651.9144 6607.6577 + 1.1625000 3681.1867 1702.2829 7960.5663 + 1.3250000 4025.5229 1731.4077 9359.3408 + 1.5000000 4363.8558 1745.5423 10909.640 + 1.6625000 4651.4830 1747.0359 12384.573 + 1.8250000 4916.5970 1740.3883 13889.387 + 2.0000000 5179.8621 1726.6207 15539.586 + 2.1625000 5406.0204 1709.4136 17096.540 + 2.3250000 5616.5075 1689.1752 18674.888 + 2.5000000 5827.5418 1665.0120 20396.396 + 2.6625000 6010.4886 1641.0890 22013.414 + 2.8250000 6182.1688 1616.2533 23646.796 + 3.0000000 6355.6855 1588.9214 25422.742 + 3.1625000 6507.2491 1563.3031 27086.425 + 3.3250000 6650.4520 1537.6768 28763.205 + 3.5000000 6796.1499 1510.2555 30582.674 + 3.6625000 6924.2096 1485.0852 32284.127 + 3.8250000 7045.8876 1460.2876 33996.408 + 4.0000000 7170.3664 1434.0733 35851.832 + 4.1625000 7280.3423 1410.2358 37584.767 + 4.3250000 7385.3277 1386.9160 39326.870 + 4.5000000 7493.2222 1362.4040 41212.722 + 4.6625000 7588.9589 1340.2135 42972.480 +""" + + cosmo_open = """\ +# from icosmo (icosmo.org) +# Om 0.3 w -1 h 0.7 Ol 0.1 +# z comoving_transvers_dist angular_diameter_dist luminosity_dist + 0.0000000 0.0000000 0.0000000 0.0000000 + 0.16250000 643.08185 553.18868 747.58265 + 0.32500000 1200.9858 906.40441 1591.3062 + 0.50000000 1731.6262 1154.4175 2597.4393 + 0.66250000 2174.3252 1307.8648 3614.8157 + 0.82500000 2578.7616 1413.0201 4706.2399 + 1.0000000 2979.3460 1489.6730 5958.6920 + 1.1625000 3324.2002 1537.2024 7188.5829 + 1.3250000 3646.8432 1568.5347 8478.9104 + 1.5000000 3972.8407 1589.1363 9932.1017 + 1.6625000 4258.1131 1599.2913 11337.226 + 1.8250000 4528.5346 1603.0211 12793.110 + 2.0000000 4804.9314 1601.6438 14414.794 + 2.1625000 5049.2007 1596.5852 15968.097 + 2.3250000 5282.6693 1588.7727 17564.875 + 2.5000000 5523.0914 1578.0261 19330.820 + 2.6625000 5736.9813 1566.4113 21011.694 + 2.8250000 5942.5803 1553.6158 22730.370 + 3.0000000 6155.4289 1538.8572 24621.716 + 3.1625000 6345.6997 1524.4924 26413.975 + 3.3250000 6529.3655 1509.6799 28239.506 + 3.5000000 6720.2676 1493.3928 30241.204 + 3.6625000 6891.5474 1478.0799 32131.840 + 3.8250000 7057.4213 1462.6780 34052.058 + 4.0000000 7230.3723 1446.0745 36151.862 + 4.1625000 7385.9998 1430.7021 38130.224 + 4.3250000 7537.1112 1415.4199 40135.117 + 4.5000000 7695.0718 1399.1040 42322.895 + 4.6625000 7837.5510 1384.1150 44380.133 +""" + + cosmo_closed = """\ +# from icosmo (icosmo.org) +# Om 2 w -1 h 0.7 Ol 0.1 +# z comoving_transvers_dist angular_diameter_dist luminosity_dist + 0.0000000 0.0000000 0.0000000 0.0000000 + 0.16250000 601.80160 517.67879 699.59436 + 0.32500000 1057.9502 798.45297 1401.7840 + 0.50000000 1438.2161 958.81076 2157.3242 + 0.66250000 1718.6778 1033.7912 2857.3019 + 0.82500000 1948.2400 1067.5288 3555.5381 + 1.0000000 2152.7954 1076.3977 4305.5908 + 1.1625000 2312.3427 1069.2914 5000.4410 + 1.3250000 2448.9755 1053.3228 5693.8681 + 1.5000000 2575.6795 1030.2718 6439.1988 + 1.6625000 2677.9671 1005.8092 7130.0873 + 1.8250000 2768.1157 979.86398 7819.9270 + 2.0000000 2853.9222 951.30739 8561.7665 + 2.1625000 2924.8116 924.84161 9249.7167 + 2.3250000 2988.5333 898.80701 9936.8732 + 2.5000000 3050.3065 871.51614 10676.073 + 2.6625000 3102.1909 847.01459 11361.774 + 2.8250000 3149.5043 823.39982 12046.854 + 3.0000000 3195.9966 798.99915 12783.986 + 3.1625000 3235.5334 777.30533 13467.908 + 3.3250000 3271.9832 756.52790 14151.327 + 3.5000000 3308.1758 735.15017 14886.791 + 3.6625000 3339.2521 716.19347 15569.263 + 3.8250000 3368.1489 698.06195 16251.319 + 4.0000000 3397.0803 679.41605 16985.401 + 4.1625000 3422.1142 662.87926 17666.664 + 4.3250000 3445.5542 647.05243 18347.576 + 4.5000000 3469.1805 630.76008 19080.493 + 4.6625000 3489.7534 616.29199 19760.729 +""" + + redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_flat), unpack=1) + dm = dm * u.Mpc + da = da * u.Mpc + dl = dl * u.Mpc + cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70, Tcmb0=0.0) + assert allclose(cosmo.comoving_transverse_distance(redshifts), dm) + assert allclose(cosmo.angular_diameter_distance(redshifts), da) + assert allclose(cosmo.luminosity_distance(redshifts), dl) + + redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_open), unpack=1) + dm = dm * u.Mpc + da = da * u.Mpc + dl = dl * u.Mpc + cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.1, Tcmb0=0.0) + assert allclose(cosmo.comoving_transverse_distance(redshifts), dm) + assert allclose(cosmo.angular_diameter_distance(redshifts), da) + assert allclose(cosmo.luminosity_distance(redshifts), dl) + + redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_closed), unpack=1) + dm = dm * u.Mpc + da = da * u.Mpc + dl = dl * u.Mpc + cosmo = core.LambdaCDM(H0=70, Om0=2, Ode0=0.1, Tcmb0=0.0) + assert allclose(cosmo.comoving_transverse_distance(redshifts), dm) + assert allclose(cosmo.angular_diameter_distance(redshifts), da) + assert allclose(cosmo.luminosity_distance(redshifts), dl) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_integral(): + # Test integer vs. floating point inputs + cosmo = core.LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50) + assert allclose(cosmo.comoving_distance(3), + cosmo.comoving_distance(3.0), rtol=1e-7) + assert allclose(cosmo.comoving_distance([1, 2, 3, 5]), + cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]), + rtol=1e-7) + assert allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7) + assert allclose(cosmo.efunc([1, 2, 6]), + cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7) + assert allclose(cosmo.inv_efunc([1, 2, 6]), + cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7) + + +def test_wz(): + cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70) + assert allclose(cosmo.w(1.0), -1.) + assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]), + [-1., -1, -1, -1, -1, -1]) + + cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-0.5) + assert allclose(cosmo.w(1.0), -0.5) + assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]), + [-0.5, -0.5, -0.5, -0.5, -0.5, -0.5]) + assert allclose(cosmo.w0, -0.5) + + cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wz=0.5) + assert allclose(cosmo.w(1.0), -0.5) + assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]), + [-1.0, -0.75, -0.5, -0.25, 0.15]) + assert allclose(cosmo.w0, -1.0) + assert allclose(cosmo.wz, 0.5) + + cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5) + assert allclose(cosmo.w0, -1.0) + assert allclose(cosmo.wa, -0.5) + assert allclose(cosmo.w(1.0), -1.25) + assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]), + [-1, -1.16666667, -1.25, -1.3, -1.34848485]) + + cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9, + wa=0.2, zp=0.5) + assert allclose(cosmo.wp, -0.9) + assert allclose(cosmo.wa, 0.2) + assert allclose(cosmo.zp, 0.5) + assert allclose(cosmo.w(0.5), -0.9) + assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]), + [-0.94848485, -0.93333333, -0.9, -0.84666667, + -0.82380952, -0.78266667]) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_de_densityscale(): + cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70) + z = np.array([0.1, 0.2, 0.5, 1.5, 2.5]) + assert allclose(cosmo.de_density_scale(z), + [1.0, 1.0, 1.0, 1.0, 1.0]) + # Integer check + assert allclose(cosmo.de_density_scale(3), + cosmo.de_density_scale(3.0), rtol=1e-7) + assert allclose(cosmo.de_density_scale([1, 2, 3]), + cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) + + cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.60, w0=-0.5) + assert allclose(cosmo.de_density_scale(z), + [1.15369, 1.31453, 1.83712, 3.95285, 6.5479], + rtol=1e-4) + assert allclose(cosmo.de_density_scale(3), + cosmo.de_density_scale(3.0), rtol=1e-7) + assert allclose(cosmo.de_density_scale([1, 2, 3]), + cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) + + cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5) + assert allclose(cosmo.de_density_scale(z), + [0.746048, 0.5635595, 0.25712378, 0.026664129, + 0.0035916468], rtol=1e-4) + assert allclose(cosmo.de_density_scale(3), + cosmo.de_density_scale(3.0), rtol=1e-7) + assert allclose(cosmo.de_density_scale([1, 2, 3]), + cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) + + cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5) + assert allclose(cosmo.de_density_scale(z), + [0.9934201, 0.9767912, 0.897450, + 0.622236, 0.4458753], rtol=1e-4) + assert allclose(cosmo.de_density_scale(3), + cosmo.de_density_scale(3.0), rtol=1e-7) + assert allclose(cosmo.de_density_scale([1, 2, 3]), + cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) + + cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9, + wa=0.2, zp=0.5) + assert allclose(cosmo.de_density_scale(z), + [1.012246048, 1.0280102, 1.087439, + 1.324988, 1.565746], rtol=1e-4) + assert allclose(cosmo.de_density_scale(3), + cosmo.de_density_scale(3.0), rtol=1e-7) + assert allclose(cosmo.de_density_scale([1, 2, 3]), + cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_age(): + # WMAP7 but with Omega_relativisitic = 0 + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) + assert allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr) + assert allclose(tcos.age(4), 1.5823603508870991 * u.Gyr) + assert allclose(tcos.age([1., 5.]), + [5.97113193, 1.20553129] * u.Gyr) + assert allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr) + + # Add relativistic species + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0) + assert allclose(tcos.age(4), 1.5773003779230699 * u.Gyr) + assert allclose(tcos.age([1, 5]), [5.96344942, 1.20093077] * u.Gyr) + + # And massive neutrinos + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0, + m_nu=0.1 * u.eV) + assert allclose(tcos.age(4), 1.5546485439853412 * u.Gyr) + assert allclose(tcos.age([1, 5]), [5.88448152, 1.18383759] * u.Gyr) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_distmod(): + # WMAP7 but with Omega_relativisitic = 0 + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) + assert allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc) + assert allclose(tcos.distmod([1, 5]), + [44.124857, 48.40167258] * u.mag) + assert allclose(tcos.distmod([1., 5.]), + [44.124857, 48.40167258] * u.mag) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_neg_distmod(): + # Cosmology with negative luminosity distances (perfectly okay, + # if obscure) + tcos = core.LambdaCDM(70, 0.2, 1.3, Tcmb0=0) + assert allclose(tcos.luminosity_distance([50, 100]), + [16612.44047622, -46890.79092244] * u.Mpc) + assert allclose(tcos.distmod([50, 100]), + [46.102167189, 48.355437790944] * u.mag) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_critical_density(): + # WMAP7 but with Omega_relativistic = 0 + # These tests will fail if astropy.const starts returning non-mks + # units by default; see the comment at the top of core.py + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) + assert allclose(tcos.critical_density0, + 9.309668456020899e-30 * u.g / u.cm**3) + assert allclose(tcos.critical_density0, + tcos.critical_density(0)) + assert allclose(tcos.critical_density([1, 5]), + [2.70352772e-29, 5.53739080e-28] * u.g / u.cm**3) + assert allclose(tcos.critical_density([1., 5.]), + [2.70352772e-29, 5.53739080e-28] * u.g / u.cm**3) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_comoving_distance_z1z2(): + tcos = core.LambdaCDM(100, 0.3, 0.8, Tcmb0=0.0) + with pytest.raises(ValueError): # test diff size z1, z2 fail + tcos._comoving_distance_z1z2((1, 2), (3, 4, 5)) + # Comoving distances are invertible + assert allclose(tcos._comoving_distance_z1z2(1, 2), + -tcos._comoving_distance_z1z2(2, 1)) + + z1 = 0, 0, 2, 0.5, 1 + z2 = 2, 1, 1, 2.5, 1.1 + results = (3767.90579253, + 2386.25591391, + -1381.64987862, + 2893.11776663, + 174.1524683) * u.Mpc + + assert allclose(tcos._comoving_distance_z1z2(z1, z2), + results) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_comoving_transverse_distance_z1z2(): + tcos = core.FlatLambdaCDM(100, 0.3, Tcmb0=0.0) + with pytest.raises(ValueError): # test diff size z1, z2 fail + tcos._comoving_transverse_distance_z1z2((1, 2), (3, 4, 5)) + # Tests that should actually work, target values computed with + # http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML + # Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686) + assert allclose(tcos._comoving_transverse_distance_z1z2(1, 2), + 1313.2232194828466 * u.Mpc) + + # In a flat universe comoving distance and comoving transverse + # distance are identical + z1 = 0, 0, 2, 0.5, 1 + z2 = 2, 1, 1, 2.5, 1.1 + + assert allclose(tcos._comoving_distance_z1z2(z1, z2), + tcos._comoving_transverse_distance_z1z2(z1, z2)) + + # Test non-flat cases to avoid simply testing + # comoving_distance_z1z2. Test array, array case. + tcos = core.LambdaCDM(100, 0.3, 0.5, Tcmb0=0.0) + results = (3535.931375645655, + 2226.430046551708, + -1208.6817970036532, + 2595.567367601969, + 151.36592003406884) * u.Mpc + + assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), + results) + + # Test positive curvature with scalar, array combination. + tcos = core.LambdaCDM(100, 1.0, 0.2, Tcmb0=0.0) + z1 = 0.1 + z2 = 0, 0.1, 0.2, 0.5, 1.1, 2 + results = (-281.31602666724865, + 0., + 248.58093707820436, + 843.9331377460543, + 1618.6104987686672, + 2287.5626543279927) * u.Mpc + + assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), + results) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_angular_diameter_distance_z1z2(): + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) + with pytest.raises(ValueError): # test diff size z1, z2 fail + tcos.angular_diameter_distance_z1z2([1, 2], [3, 4, 5]) + # Tests that should actually work + assert allclose(tcos.angular_diameter_distance_z1z2(1, 2), + 646.22968662822018 * u.Mpc) + + z1 = 0, 0, 2, 0.5, 1 + z2 = 2, 1, 1, 2.5, 1.1 + results = (1760.0628637762106, + 1670.7497657219858, + -969.34452994, + 1159.0970895962193, + 115.72768186186921) * u.Mpc + + assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2), + results) + + z1 = 0.1 + z2 = 0.1, 0.2, 0.5, 1.1, 2 + results = (0., + 332.09893173, + 986.35635069, + 1508.37010062, + 1621.07937976) * u.Mpc + assert allclose(tcos.angular_diameter_distance_z1z2(0.1, z2), + results) + + # Non-flat (positive Ok0) test + tcos = core.LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0) + assert allclose(tcos.angular_diameter_distance_z1z2(1, 2), + 620.1175337852428 * u.Mpc) + # Non-flat (negative Ok0) test + tcos = core.LambdaCDM(H0=100, Om0=2, Ode0=1, Tcmb0=0.0) + assert allclose(tcos.angular_diameter_distance_z1z2(1, 2), + 228.42914659246014 * u.Mpc) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_absorption_distance(): + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) + assert allclose(tcos.absorption_distance([1, 3]), + [1.72576635, 7.98685853]) + assert allclose(tcos.absorption_distance([1., 3.]), + [1.72576635, 7.98685853]) + assert allclose(tcos.absorption_distance(3), 7.98685853) + assert allclose(tcos.absorption_distance(3.), 7.98685853) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_massivenu_basic(): + # Test no neutrinos case + tcos = core.FlatLambdaCDM(70.4, 0.272, Neff=4.05, + Tcmb0=2.725 * u.K, m_nu=u.Quantity(0, u.eV)) + assert allclose(tcos.Neff, 4.05) + assert not tcos.has_massive_nu + mnu = tcos.m_nu + assert len(mnu) == 4 + assert mnu.unit == u.eV + assert allclose(mnu, [0.0, 0.0, 0.0, 0.0] * u.eV) + assert allclose(tcos.nu_relative_density(1.), 0.22710731766 * 4.05, + rtol=1e-6) + assert allclose(tcos.nu_relative_density(1), 0.22710731766 * 4.05, + rtol=1e-6) + + # Alternative no neutrinos case + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0 * u.K, + m_nu=u.Quantity(0.4, u.eV)) + assert not tcos.has_massive_nu + assert tcos.m_nu is None + + # Test basic setting, retrieval of values + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.725 * u.K, + m_nu=u.Quantity([0.0, 0.01, 0.02], u.eV)) + assert tcos.has_massive_nu + mnu = tcos.m_nu + assert len(mnu) == 3 + assert mnu.unit == u.eV + assert allclose(mnu, [0.0, 0.01, 0.02] * u.eV) + + # All massive neutrinos case + tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.725, + m_nu=u.Quantity(0.1, u.eV), Neff=3.1) + assert allclose(tcos.Neff, 3.1) + assert tcos.has_massive_nu + mnu = tcos.m_nu + assert len(mnu) == 3 + assert mnu.unit == u.eV + assert allclose(mnu, [0.1, 0.1, 0.1] * u.eV) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_distances(): + # Test distance calculations for various special case + # scenarios (no relativistic species, normal, massive neutrinos) + # These do not come from external codes -- they are just internal + # checks to make sure nothing changes if we muck with the distance + # calculators + + z = np.array([1.0, 2.0, 3.0, 4.0]) + + # The pattern here is: no relativistic species, the relativistic + # species with massless neutrinos, then massive neutrinos + cos = core.LambdaCDM(75.0, 0.25, 0.5, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [2953.93001902, 4616.7134253, 5685.07765971, + 6440.80611897] * u.Mpc, rtol=1e-4) + cos = core.LambdaCDM(75.0, 0.25, 0.6, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [3037.12620424, 4776.86236327, 5889.55164479, + 6671.85418235] * u.Mpc, rtol=1e-4) + cos = core.LambdaCDM(75.0, 0.3, 0.4, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(10.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2471.80626824, 3567.1902565, 4207.15995626, + 4638.20476018] * u.Mpc, rtol=1e-4) + # Flat + cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [3180.83488552, 5060.82054204, 6253.6721173, + 7083.5374303] * u.Mpc, rtol=1e-4) + cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [3180.42662867, 5059.60529655, 6251.62766102, + 7080.71698117] * u.Mpc, rtol=1e-4) + cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(10.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2337.54183142, 3371.91131264, 3988.40711188, + 4409.09346922] * u.Mpc, rtol=1e-4) + # Add w + cos = core.FlatwCDM(75.0, 0.25, w0=-1.05, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [3216.8296894, 5117.2097601, 6317.05995437, + 7149.68648536] * u.Mpc, rtol=1e-4) + cos = core.FlatwCDM(75.0, 0.25, w0=-0.95, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [3143.56537758, 5000.32196494, 6184.11444601, + 7009.80166062] * u.Mpc, rtol=1e-4) + cos = core.FlatwCDM(75.0, 0.25, w0=-0.9, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(10.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2337.76035371, 3372.1971387, 3988.71362289, + 4409.40817174] * u.Mpc, rtol=1e-4) + # Non-flat w + cos = core.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [2849.6163356, 4428.71661565, 5450.97862778, + 6179.37072324] * u.Mpc, rtol=1e-4) + cos = core.wCDM(75.0, 0.25, 0.4, w0=-1.1, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2904.35580229, 4511.11471267, 5543.43643353, + 6275.9206788] * u.Mpc, rtol=1e-4) + cos = core.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(10.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2473.32522734, 3581.54519631, 4232.41674426, + 4671.83818117] * u.Mpc, rtol=1e-4) + # w0wa + cos = core.w0waCDM(75.0, 0.3, 0.6, w0=-0.9, wa=0.1, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [2937.7807638, 4572.59950903, 5611.52821924, + 6339.8549956] * u.Mpc, rtol=1e-4) + cos = core.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2907.34722624, 4539.01723198, 5593.51611281, + 6342.3228444] * u.Mpc, rtol=1e-4) + cos = core.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(10.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2507.18336722, 3633.33231695, 4292.44746919, + 4736.35404638] * u.Mpc, rtol=1e-4) + # Flatw0wa + cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [3123.29892781, 4956.15204302, 6128.15563818, + 6948.26480378] * u.Mpc, rtol=1e-4) + cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [3122.92671907, 4955.03768936, 6126.25719576, + 6945.61856513] * u.Mpc, rtol=1e-4) + cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(10.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2337.70072701, 3372.13719963, 3988.6571093, + 4409.35399673] * u.Mpc, rtol=1e-4) + # wpwa + cos = core.wpwaCDM(75.0, 0.3, 0.6, wp=-0.9, zp=0.5, wa=0.1, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [2954.68975298, 4599.83254834, 5643.04013201, + 6373.36147627] * u.Mpc, rtol=1e-4) + cos = core.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=0.4, wa=0.1, + Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2919.00656215, 4558.0218123, 5615.73412391, + 6366.10224229] * u.Mpc, rtol=1e-4) + cos = core.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=1.0, wa=0.1, Tcmb0=3.0, + Neff=4, m_nu=u.Quantity(5.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2629.48489827, 3874.13392319, 4614.31562397, + 5116.51184842] * u.Mpc, rtol=1e-4) + + # w0wz + cos = core.w0wzCDM(75.0, 0.3, 0.6, w0=-0.9, wz=0.1, Tcmb0=0.0) + assert allclose(cos.comoving_distance(z), + [3051.68786716, 4756.17714818, 5822.38084257, + 6562.70873734] * u.Mpc, rtol=1e-4) + cos = core.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1, + Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2997.8115653, 4686.45599916, 5764.54388557, + 6524.17408738] * u.Mpc, rtol=1e-4) + cos = core.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1, Tcmb0=3.0, + Neff=4, m_nu=u.Quantity(5.0, u.eV)) + assert allclose(cos.comoving_distance(z), + [2676.73467639, 3940.57967585, 4686.90810278, + 5191.54178243] * u.Mpc, rtol=1e-4) + + # Also test different numbers of massive neutrinos + # for FlatLambdaCDM to give the scalar nu density functions a + # work out + cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, + m_nu=u.Quantity([10.0, 0, 0], u.eV)) + assert allclose(cos.comoving_distance(z), + [2777.71589173, 4186.91111666, 5046.0300719, + 5636.10397302] * u.Mpc, rtol=1e-4) + cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, + m_nu=u.Quantity([10.0, 5, 0], u.eV)) + assert allclose(cos.comoving_distance(z), + [2636.48149391, 3913.14102091, 4684.59108974, + 5213.07557084] * u.Mpc, rtol=1e-4) + cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, + m_nu=u.Quantity([4.0, 5, 9], u.eV)) + assert allclose(cos.comoving_distance(z), + [2563.5093049, 3776.63362071, 4506.83448243, + 5006.50158829] * u.Mpc, rtol=1e-4) + cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=4.2, + m_nu=u.Quantity([1.0, 4.0, 5, 9], u.eV)) + assert allclose(cos.comoving_distance(z), + [2525.58017482, 3706.87633298, 4416.58398847, + 4901.96669755] * u.Mpc, rtol=1e-4) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_massivenu_density(): + # Testing neutrino density calculation + + # Simple test cosmology, where we compare rho_nu and rho_gamma + # against the exact formula (eq 24/25 of Komatsu et al. 2011) + # computed using Mathematica. The approximation we use for f(y) + # is only good to ~ 0.5% (with some redshift dependence), so that's + # what we test to. + ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0]) + nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) + # First try 3 massive neutrinos, all 100 eV -- note this is a universe + # seriously dominated by neutrinos! + tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(100.0, u.eV)) + assert tcos.has_massive_nu + assert tcos.Neff == 3 + nurel_exp = nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323, + 15633.5, 171.801]) + assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3) + assert allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3) + + # Next, slightly less massive + tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.25, u.eV)) + nurel_exp = nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312, + 39.1005, 1.11086]) + assert allclose(tcos.nu_relative_density(ztest), nurel_exp, + rtol=5e-3) + + # For this one also test Onu directly + onu_exp = np.array([0.01890217, 0.05244681, 0.0638236, + 0.06999286, 0.1344951]) + assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) + + # And fairly light + tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3, + m_nu=u.Quantity(0.01, u.eV)) + + nurel_exp = nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348, + 1.90671, 1.00021]) + assert allclose(tcos.nu_relative_density(ztest), nurel_exp, + rtol=5e-3) + onu_exp = np.array([0.00066599, 0.00172677, 0.0020732, + 0.00268404, 0.0978313]) + assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) + assert allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048], + rtol=1e-4) + assert allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534], + rtol=1e-4) + + # Now a mixture of neutrino masses, with non-integer Neff + tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3.04, + m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV)) + nurel_exp = nuprefac * tcos.Neff * \ + np.array([149.386233, 74.87915, 50.0518, + 14.002403, 1.03702333]) + assert allclose(tcos.nu_relative_density(ztest), nurel_exp, + rtol=5e-3) + onu_exp = np.array([0.00584959, 0.01493142, 0.01772291, + 0.01963451, 0.10227728]) + assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) + + # Integer redshifts + ztest = ztest.astype(np.int) + assert allclose(tcos.nu_relative_density(ztest), nurel_exp, + rtol=5e-3) + assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_z_at_value(): + # These are tests of expected values, and hence have less precision + # than the roundtrip tests below (test_z_at_value_roundtrip); + # here we have to worry about the cosmological calculations + # giving slightly different values on different architectures, + # there we are checking internal consistency on the same architecture + # and so can be more demanding + z_at_value = funcs.z_at_value + cosmo = core.Planck13 + d = cosmo.luminosity_distance(3) + assert allclose(z_at_value(cosmo.luminosity_distance, d), 3, + rtol=1e-8) + assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.198122684356, + rtol=1e-6) + assert allclose(z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc), + 1.3685790653802761, rtol=1e-6) + assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr), + 0.7951983674601507, rtol=1e-6) + assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, + zmax=2), 0.68127769625288614, rtol=1e-6) + assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, + zmin=2.5), 3.7914908028272083, rtol=1e-6) + assert allclose(z_at_value(cosmo.distmod, 46 * u.mag), + 1.9913891680278133, rtol=1e-6) + + # test behaviour when the solution is outside z limits (should + # raise a CosmologyError) + with pytest.raises(core.CosmologyError): + z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=0.5) + with pytest.raises(core.CosmologyError): + z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=4.) + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_z_at_value_roundtrip(): + """ + Calculate values from a known redshift, and then check that + z_at_value returns the right answer. + """ + z = 0.5 + + # Skip Ok, w, de_density_scale because in the Planck13 cosmology + # they are redshift independent and hence uninvertable, + # *_distance_z1z2 methods take multiple arguments, so require + # special handling + # clone isn't a redshift-dependent method + skip = ('Ok', + 'angular_diameter_distance_z1z2', + 'clone', + 'de_density_scale', 'w') + + import inspect + methods = inspect.getmembers(core.Planck13, predicate=inspect.ismethod) + + for name, func in methods: + if name.startswith('_') or name in skip: + continue + print('Round-trip testing {0}'.format(name)) + fval = func(z) + # we need zmax here to pick the right solution for + # angular_diameter_distance and related methods. + # Be slightly more generous with rtol than the default 1e-8 + # used in z_at_value + assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5), + rtol=2e-8) + + # Test distance functions between two redshifts + z2 = 2.0 + func_z1z2 = [lambda z1: core.Planck13._comoving_distance_z1z2(z1, z2), + lambda z1: + core.Planck13._comoving_transverse_distance_z1z2(z1, z2), + lambda z1: + core.Planck13.angular_diameter_distance_z1z2(z1, z2)] + for func in func_z1z2: + fval = func(z) + assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5), + rtol=2e-8) diff --git a/astropy/cosmology/tests/test_pickle.py b/astropy/cosmology/tests/test_pickle.py new file mode 100644 index 0000000..d553179 --- /dev/null +++ b/astropy/cosmology/tests/test_pickle.py @@ -0,0 +1,19 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import absolute_import, division, print_function, unicode_literals + +import pytest + +from ...tests.helper import pickle_protocol, check_pickling_recovery +from ...extern.six.moves import zip +from ... import cosmology as cosm + +originals = [cosm.FLRW] +xfails = [False] + + +@pytest.mark.parametrize(("original", "xfail"), + zip(originals, xfails)) +def test_flrw(pickle_protocol, original, xfail): + if xfail: + pytest.xfail() + check_pickling_recovery(original, pickle_protocol) diff --git a/astropy/cython_version.py b/astropy/cython_version.py new file mode 100644 index 0000000..bf70d8c --- /dev/null +++ b/astropy/cython_version.py @@ -0,0 +1,2 @@ +# Generated file; do not modify +cython_version = '0.27.3' diff --git a/astropy/extern/__init__.py b/astropy/extern/__init__.py new file mode 100644 index 0000000..4c54f84 --- /dev/null +++ b/astropy/extern/__init__.py @@ -0,0 +1,10 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This packages contains python packages that are bundled with Astropy but are +external to Astropy, and hence are developed in a separate source tree. Note +that this package is distinct from the /cextern directory of the source code +distribution, as that directory only contains C extension code. + +See the README.rst in this directory of the Astropy source repository for more +details. +""" diff --git a/astropy/extern/bundled/__init__.py b/astropy/extern/bundled/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/astropy/extern/bundled/six.py b/astropy/extern/bundled/six.py new file mode 100644 index 0000000..190c023 --- /dev/null +++ b/astropy/extern/bundled/six.py @@ -0,0 +1,868 @@ +"""Utilities for writing code that runs on Python 2 and 3""" + +# Copyright (c) 2010-2015 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.10.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + if from_value is None: + raise value + raise value from from_value +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + raise value from from_value +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/astropy/extern/configobj/__init__.py b/astropy/extern/configobj/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/astropy/extern/configobj/configobj.py b/astropy/extern/configobj/configobj.py new file mode 100755 index 0000000..59b7fe3 --- /dev/null +++ b/astropy/extern/configobj/configobj.py @@ -0,0 +1,2485 @@ +# configobj.py +# A config file reader/writer that supports nested sections in config files. +# Copyright (C) 2005-2014: +# (name) : (email) +# Michael Foord: fuzzyman AT voidspace DOT org DOT uk +# Nicola Larosa: nico AT tekNico DOT net +# Rob Dennis: rdennis AT gmail DOT com +# Eli Courtwright: eli AT courtwright DOT org + +# This software is licensed under the terms of the BSD license. +# http://opensource.org/licenses/BSD-3-Clause + +# ConfigObj 5 - main repository for documentation and issue tracking: +# https://github.com/DiffSK/configobj + +import os +import re +import sys +import collections + +from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE + +from ...extern import six +from ...extern.six.moves import range, zip, map +# from __future__ import __version__ + +# imported lazily to avoid startup performance hit if it isn't used +compiler = None + +# A dictionary mapping BOM to +# the encoding to decode with, and what to set the +# encoding attribute to. +BOMS = { + BOM_UTF8: ('utf_8', None), + BOM_UTF16_BE: ('utf16_be', 'utf_16'), + BOM_UTF16_LE: ('utf16_le', 'utf_16'), + BOM_UTF16: ('utf_16', 'utf_16'), + } +# All legal variants of the BOM codecs. +# TODO: the list of aliases is not meant to be exhaustive, is there a +# better way ? +BOM_LIST = { + 'utf_16': 'utf_16', + 'u16': 'utf_16', + 'utf16': 'utf_16', + 'utf-16': 'utf_16', + 'utf16_be': 'utf16_be', + 'utf_16_be': 'utf16_be', + 'utf-16be': 'utf16_be', + 'utf16_le': 'utf16_le', + 'utf_16_le': 'utf16_le', + 'utf-16le': 'utf16_le', + 'utf_8': 'utf_8', + 'u8': 'utf_8', + 'utf': 'utf_8', + 'utf8': 'utf_8', + 'utf-8': 'utf_8', + } + +# Map of encodings to the BOM to write. +BOM_SET = { + 'utf_8': BOM_UTF8, + 'utf_16': BOM_UTF16, + 'utf16_be': BOM_UTF16_BE, + 'utf16_le': BOM_UTF16_LE, + None: BOM_UTF8 + } + + +def match_utf8(encoding): + return BOM_LIST.get(encoding.lower()) == 'utf_8' + + +# Quote strings used for writing values +squot = "'%s'" +dquot = '"%s"' +noquot = "%s" +wspace_plus = ' \r\n\v\t\'"' +tsquot = '"""%s"""' +tdquot = "'''%s'''" + +# Sentinel for use in getattr calls to replace hasattr +MISSING = object() + +__all__ = ( + 'DEFAULT_INDENT_TYPE', + 'DEFAULT_INTERPOLATION', + 'ConfigObjError', + 'NestingError', + 'ParseError', + 'DuplicateError', + 'ConfigspecError', + 'ConfigObj', + 'SimpleVal', + 'InterpolationError', + 'InterpolationLoopError', + 'MissingInterpolationOption', + 'RepeatSectionError', + 'ReloadError', + 'UnreprError', + 'UnknownType', + 'flatten_errors', + 'get_extra_values' +) + +DEFAULT_INTERPOLATION = 'configparser' +DEFAULT_INDENT_TYPE = ' ' +MAX_INTERPOL_DEPTH = 10 + +OPTION_DEFAULTS = { + 'interpolation': True, + 'raise_errors': False, + 'list_values': True, + 'create_empty': False, + 'file_error': False, + 'configspec': None, + 'stringify': True, + # option may be set to one of ('', ' ', '\t') + 'indent_type': None, + 'encoding': None, + 'default_encoding': None, + 'unrepr': False, + 'write_empty_values': False, +} + +# this could be replaced if six is used for compatibility, or there are no +# more assertions about items being a string + + +def getObj(s): + global compiler + if compiler is None: + import compiler + s = "a=" + s + p = compiler.parse(s) + return p.getChildren()[1].getChildren()[0].getChildren()[1] + + +class UnknownType(Exception): + pass + + +class Builder(object): + + def build(self, o): + if m is None: + raise UnknownType(o.__class__.__name__) + return m(o) + + def build_List(self, o): + return list(map(self.build, o.getChildren())) + + def build_Const(self, o): + return o.value + + def build_Dict(self, o): + d = {} + i = iter(map(self.build, o.getChildren())) + for el in i: + d[el] = next(i) + return d + + def build_Tuple(self, o): + return tuple(self.build_List(o)) + + def build_Name(self, o): + if o.name == 'None': + return None + if o.name == 'True': + return True + if o.name == 'False': + return False + + # An undefined Name + raise UnknownType('Undefined Name') + + def build_Add(self, o): + real, imag = list(map(self.build_Const, o.getChildren())) + try: + real = float(real) + except TypeError: + raise UnknownType('Add') + if not isinstance(imag, complex) or imag.real != 0.0: + raise UnknownType('Add') + return real+imag + + def build_Getattr(self, o): + parent = self.build(o.expr) + return getattr(parent, o.attrname) + + def build_UnarySub(self, o): + return -self.build_Const(o.getChildren()[0]) + + def build_UnaryAdd(self, o): + return self.build_Const(o.getChildren()[0]) + + +_builder = Builder() + + +def unrepr(s): + if not s: + return s + + # this is supposed to be safe + import ast + return ast.literal_eval(s) + + +class ConfigObjError(SyntaxError): + """ + This is the base class for all errors that ConfigObj raises. + It is a subclass of SyntaxError. + """ + def __init__(self, message='', line_number=None, line=''): + self.line = line + self.line_number = line_number + SyntaxError.__init__(self, message) + + +class NestingError(ConfigObjError): + """ + This error indicates a level of nesting that doesn't match. + """ + + +class ParseError(ConfigObjError): + """ + This error indicates that a line is badly written. + It is neither a valid ``key = value`` line, + nor a valid section marker line. + """ + + +class ReloadError(IOError): + """ + A 'reload' operation failed. + This exception is a subclass of ``IOError``. + """ + def __init__(self): + IOError.__init__(self, 'reload failed, filename is not set.') + + +class DuplicateError(ConfigObjError): + """ + The keyword or section specified already exists. + """ + + +class ConfigspecError(ConfigObjError): + """ + An error occured whilst parsing a configspec. + """ + + +class InterpolationError(ConfigObjError): + """Base class for the two interpolation errors.""" + + +class InterpolationLoopError(InterpolationError): + """Maximum interpolation depth exceeded in string interpolation.""" + + def __init__(self, option): + InterpolationError.__init__( + self, + 'interpolation loop detected in value "%s".' % option) + + +class RepeatSectionError(ConfigObjError): + """ + This error indicates additional sections in a section with a + ``__many__`` (repeated) section. + """ + + +class MissingInterpolationOption(InterpolationError): + """A value specified for interpolation was missing.""" + def __init__(self, option): + msg = 'missing option "%s" in interpolation.' % option + InterpolationError.__init__(self, msg) + + +class UnreprError(ConfigObjError): + """An error parsing in unrepr mode.""" + + + +class InterpolationEngine(object): + """ + A helper class to help perform string interpolation. + + This class is an abstract base class; its descendants perform + the actual work. + """ + + # compiled regexp to use in self.interpolate() + _KEYCRE = re.compile(r"%\(([^)]*)\)s") + _cookie = '%' + + def __init__(self, section): + # the Section instance that "owns" this engine + self.section = section + + + def interpolate(self, key, value): + # short-cut + if not self._cookie in value: + return value + + def recursive_interpolate(key, value, section, backtrail): + """The function that does the actual work. + + ``value``: the string we're trying to interpolate. + ``section``: the section in which that string was found + ``backtrail``: a dict to keep track of where we've been, + to detect and prevent infinite recursion loops + + This is similar to a depth-first-search algorithm. + """ + # Have we been here already? + if (key, section.name) in backtrail: + # Yes - infinite loop detected + raise InterpolationLoopError(key) + # Place a marker on our backtrail so we won't come back here again + backtrail[(key, section.name)] = 1 + + # Now start the actual work + match = self._KEYCRE.search(value) + while match: + # The actual parsing of the match is implementation-dependent, + # so delegate to our helper function + k, v, s = self._parse_match(match) + if k is None: + # That's the signal that no further interpolation is needed + replacement = v + else: + # Further interpolation may be needed to obtain final value + replacement = recursive_interpolate(k, v, s, backtrail) + # Replace the matched string with its final value + start, end = match.span() + value = ''.join((value[:start], replacement, value[end:])) + new_search_start = start + len(replacement) + # Pick up the next interpolation key, if any, for next time + # through the while loop + match = self._KEYCRE.search(value, new_search_start) + + # Now safe to come back here again; remove marker from backtrail + del backtrail[(key, section.name)] + + return value + + # Back in interpolate(), all we have to do is kick off the recursive + # function with appropriate starting values + value = recursive_interpolate(key, value, self.section, {}) + return value + + + def _fetch(self, key): + """Helper function to fetch values from owning section. + + Returns a 2-tuple: the value, and the section where it was found. + """ + # switch off interpolation before we try and fetch anything ! + save_interp = self.section.main.interpolation + self.section.main.interpolation = False + + # Start at section that "owns" this InterpolationEngine + current_section = self.section + while True: + # try the current section first + val = current_section.get(key) + if val is not None and not isinstance(val, Section): + break + # try "DEFAULT" next + val = current_section.get('DEFAULT', {}).get(key) + if val is not None and not isinstance(val, Section): + break + # move up to parent and try again + # top-level's parent is itself + if current_section.parent is current_section: + # reached top level, time to give up + break + current_section = current_section.parent + + # restore interpolation to previous value before returning + self.section.main.interpolation = save_interp + if val is None: + raise MissingInterpolationOption(key) + return val, current_section + + + def _parse_match(self, match): + """Implementation-dependent helper function. + + Will be passed a match object corresponding to the interpolation + key we just found (e.g., "%(foo)s" or "$foo"). Should look up that + key in the appropriate config file section (using the ``_fetch()`` + helper function) and return a 3-tuple: (key, value, section) + + ``key`` is the name of the key we're looking for + ``value`` is the value found for that key + ``section`` is a reference to the section where it was found + + ``key`` and ``section`` should be None if no further + interpolation should be performed on the resulting value + (e.g., if we interpolated "$$" and returned "$"). + """ + raise NotImplementedError() + + + +class ConfigParserInterpolation(InterpolationEngine): + """Behaves like ConfigParser.""" + _cookie = '%' + _KEYCRE = re.compile(r"%\(([^)]*)\)s") + + def _parse_match(self, match): + key = match.group(1) + value, section = self._fetch(key) + return key, value, section + + + +class TemplateInterpolation(InterpolationEngine): + """Behaves like string.Template.""" + _cookie = '$' + _delimiter = '$' + _KEYCRE = re.compile(r""" + \$(?: + (?P\$) | # Two $ signs + (?P[_a-z][_a-z0-9]*) | # $name format + {(?P[^}]*)} # ${name} format + ) + """, re.IGNORECASE | re.VERBOSE) + + def _parse_match(self, match): + # Valid name (in or out of braces): fetch value from section + key = match.group('named') or match.group('braced') + if key is not None: + value, section = self._fetch(key) + return key, value, section + # Escaped delimiter (e.g., $$): return single delimiter + if match.group('escaped') is not None: + # Return None for key and section to indicate it's time to stop + return None, self._delimiter, None + # Anything else: ignore completely, just return it unchanged + return None, match.group(), None + + +interpolation_engines = { + 'configparser': ConfigParserInterpolation, + 'template': TemplateInterpolation, +} + + +def __newobj__(cls, *args): + # Hack for pickle + return cls.__new__(cls, *args) + +class Section(dict): + """ + A dictionary-like object that represents a section in a config file. + + It does string interpolation if the 'interpolation' attribute + of the 'main' object is set to True. + + Interpolation is tried first from this object, then from the 'DEFAULT' + section of this object, next from the parent and its 'DEFAULT' section, + and so on until the main object is reached. + + A Section will behave like an ordered dictionary - following the + order of the ``scalars`` and ``sections`` attributes. + You can use this to change the order of members. + + Iteration follows the order: scalars, then sections. + """ + + + def __setstate__(self, state): + dict.update(self, state[0]) + self.__dict__.update(state[1]) + + def __reduce__(self): + state = (dict(self), self.__dict__) + return (__newobj__, (self.__class__,), state) + + + def __init__(self, parent, depth, main, indict=None, name=None): + """ + * parent is the section above + * depth is the depth level of this section + * main is the main ConfigObj + * indict is a dictionary to initialise the section with + """ + if indict is None: + indict = {} + dict.__init__(self) + # used for nesting level *and* interpolation + self.parent = parent + # used for the interpolation attribute + self.main = main + # level of nesting depth of this Section + self.depth = depth + # purely for information + self.name = name + # + self._initialise() + # we do this explicitly so that __setitem__ is used properly + # (rather than just passing to ``dict.__init__``) + for entry, value in indict.items(): + self[entry] = value + + + def _initialise(self): + # the sequence of scalar values in this Section + self.scalars = [] + # the sequence of sections in this Section + self.sections = [] + # for comments :-) + self.comments = {} + self.inline_comments = {} + # the configspec + self.configspec = None + # for defaults + self.defaults = [] + self.default_values = {} + self.extra_values = [] + self._created = False + + + def _interpolate(self, key, value): + try: + # do we already have an interpolation engine? + engine = self._interpolation_engine + except AttributeError: + # not yet: first time running _interpolate(), so pick the engine + name = self.main.interpolation + if name == True: # note that "if name:" would be incorrect here + # backwards-compatibility: interpolation=True means use default + name = DEFAULT_INTERPOLATION + name = name.lower() # so that "Template", "template", etc. all work + class_ = interpolation_engines.get(name, None) + if class_ is None: + # invalid value for self.main.interpolation + self.main.interpolation = False + return value + else: + # save reference to engine so we don't have to do this again + engine = self._interpolation_engine = class_(self) + # let the engine do the actual work + return engine.interpolate(key, value) + + + def __getitem__(self, key): + """Fetch the item and do string interpolation.""" + val = dict.__getitem__(self, key) + if self.main.interpolation: + if isinstance(val, six.string_types): + return self._interpolate(key, val) + if isinstance(val, list): + def _check(entry): + if isinstance(entry, six.string_types): + return self._interpolate(key, entry) + return entry + new = [_check(entry) for entry in val] + if new != val: + return new + return val + + + def __setitem__(self, key, value, unrepr=False): + """ + Correctly set a value. + + Making dictionary values Section instances. + (We have to special case 'Section' instances - which are also dicts) + + Keys must be strings. + Values need only be strings (or lists of strings) if + ``main.stringify`` is set. + + ``unrepr`` must be set when setting a value to a dictionary, without + creating a new sub-section. + """ + if not isinstance(key, six.string_types): + raise ValueError('The key "%s" is not a string.' % key) + + # add the comment + if key not in self.comments: + self.comments[key] = [] + self.inline_comments[key] = '' + # remove the entry from defaults + if key in self.defaults: + self.defaults.remove(key) + # + if isinstance(value, Section): + if key not in self: + self.sections.append(key) + dict.__setitem__(self, key, value) + elif isinstance(value, collections.Mapping) and not unrepr: + # First create the new depth level, + # then create the section + if key not in self: + self.sections.append(key) + new_depth = self.depth + 1 + dict.__setitem__( + self, + key, + Section( + self, + new_depth, + self.main, + indict=value, + name=key)) + else: + if key not in self: + self.scalars.append(key) + if not self.main.stringify: + if isinstance(value, six.string_types): + pass + elif isinstance(value, (list, tuple)): + for entry in value: + if not isinstance(entry, six.string_types): + raise TypeError('Value is not a string "%s".' % entry) + else: + raise TypeError('Value is not a string "%s".' % value) + dict.__setitem__(self, key, value) + + + def __delitem__(self, key): + """Remove items from the sequence when deleting.""" + dict. __delitem__(self, key) + if key in self.scalars: + self.scalars.remove(key) + else: + self.sections.remove(key) + del self.comments[key] + del self.inline_comments[key] + + + def get(self, key, default=None): + """A version of ``get`` that doesn't bypass string interpolation.""" + try: + return self[key] + except KeyError: + return default + + + def update(self, indict): + """ + A version of update that uses our ``__setitem__``. + """ + for entry in indict: + self[entry] = indict[entry] + + + def pop(self, key, default=MISSING): + """ + 'D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised' + """ + try: + val = self[key] + except KeyError: + if default is MISSING: + raise + val = default + else: + del self[key] + return val + + + def popitem(self): + """Pops the first (key,val)""" + sequence = (self.scalars + self.sections) + if not sequence: + raise KeyError(": 'popitem(): dictionary is empty'") + key = sequence[0] + val = self[key] + del self[key] + return key, val + + + def clear(self): + """ + A version of clear that also affects scalars/sections + Also clears comments and configspec. + + Leaves other attributes alone : + depth/main/parent are not affected + """ + dict.clear(self) + self.scalars = [] + self.sections = [] + self.comments = {} + self.inline_comments = {} + self.configspec = None + self.defaults = [] + self.extra_values = [] + + + def setdefault(self, key, default=None): + """A version of setdefault that sets sequence if appropriate.""" + try: + return self[key] + except KeyError: + self[key] = default + return self[key] + + + def items(self): + """D.items() -> list of D's (key, value) pairs, as 2-tuples""" + return list(zip((self.scalars + self.sections), list(self.values()))) + + + def keys(self): + """D.keys() -> list of D's keys""" + return (self.scalars + self.sections) + + + def values(self): + """D.values() -> list of D's values""" + return [self[key] for key in (self.scalars + self.sections)] + + + def iteritems(self): + """D.iteritems() -> an iterator over the (key, value) items of D""" + return iter(list(self.items())) + + + def iterkeys(self): + """D.iterkeys() -> an iterator over the keys of D""" + return iter((self.scalars + self.sections)) + + __iter__ = iterkeys + + + def itervalues(self): + """D.itervalues() -> an iterator over the values of D""" + return iter(list(self.values())) + + + def __repr__(self): + """x.__repr__() <==> repr(x)""" + def _getval(key): + try: + return self[key] + except MissingInterpolationOption: + return dict.__getitem__(self, key) + return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key)))) + for key in (self.scalars + self.sections)]) + + __str__ = __repr__ + __str__.__doc__ = "x.__str__() <==> str(x)" + + + # Extra methods - not in a normal dictionary + + def dict(self): + """ + Return a deepcopy of self as a dictionary. + + All members that are ``Section`` instances are recursively turned to + ordinary dictionaries - by calling their ``dict`` method. + + >>> n = a.dict() + >>> n == a + 1 + >>> n is a + 0 + """ + newdict = {} + for entry in self: + this_entry = self[entry] + if isinstance(this_entry, Section): + this_entry = this_entry.dict() + elif isinstance(this_entry, list): + # create a copy rather than a reference + this_entry = list(this_entry) + elif isinstance(this_entry, tuple): + # create a copy rather than a reference + this_entry = tuple(this_entry) + newdict[entry] = this_entry + return newdict + + + def merge(self, indict): + """ + A recursive update - useful for merging config files. + + >>> a = '''[section1] + ... option1 = True + ... [[subsection]] + ... more_options = False + ... # end of file'''.splitlines() + >>> b = '''# File is user.ini + ... [section1] + ... option1 = False + ... # end of file'''.splitlines() + >>> c1 = ConfigObj(b) + >>> c2 = ConfigObj(a) + >>> c2.merge(c1) + >>> c2 + ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}) + """ + for key, val in list(indict.items()): + if (key in self and isinstance(self[key], collections.Mapping) and + isinstance(val, collections.Mapping)): + self[key].merge(val) + else: + self[key] = val + + + def rename(self, oldkey, newkey): + """ + Change a keyname to another, without changing position in sequence. + + Implemented so that transformations can be made on keys, + as well as on values. (used by encode and decode) + + Also renames comments. + """ + if oldkey in self.scalars: + the_list = self.scalars + elif oldkey in self.sections: + the_list = self.sections + else: + raise KeyError('Key "%s" not found.' % oldkey) + pos = the_list.index(oldkey) + # + val = self[oldkey] + dict.__delitem__(self, oldkey) + dict.__setitem__(self, newkey, val) + the_list.remove(oldkey) + the_list.insert(pos, newkey) + comm = self.comments[oldkey] + inline_comment = self.inline_comments[oldkey] + del self.comments[oldkey] + del self.inline_comments[oldkey] + self.comments[newkey] = comm + self.inline_comments[newkey] = inline_comment + + + def walk(self, function, raise_errors=True, + call_on_sections=False, **keywargs): + """ + Walk every member and call a function on the keyword and value. + + Return a dictionary of the return values + + If the function raises an exception, raise the errror + unless ``raise_errors=False``, in which case set the return value to + ``False``. + + Any unrecognised keyword arguments you pass to walk, will be pased on + to the function you pass in. + + Note: if ``call_on_sections`` is ``True`` then - on encountering a + subsection, *first* the function is called for the *whole* subsection, + and then recurses into it's members. This means your function must be + able to handle strings, dictionaries and lists. This allows you + to change the key of subsections as well as for ordinary members. The + return value when called on the whole subsection has to be discarded. + + See the encode and decode methods for examples, including functions. + + .. admonition:: caution + + You can use ``walk`` to transform the names of members of a section + but you mustn't add or delete members. + + >>> config = '''[XXXXsection] + ... XXXXkey = XXXXvalue'''.splitlines() + >>> cfg = ConfigObj(config) + >>> cfg + ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}}) + >>> def transform(section, key): + ... val = section[key] + ... newkey = key.replace('XXXX', 'CLIENT1') + ... section.rename(key, newkey) + ... if isinstance(val, (tuple, list, dict)): + ... pass + ... else: + ... val = val.replace('XXXX', 'CLIENT1') + ... section[newkey] = val + >>> cfg.walk(transform, call_on_sections=True) + {'CLIENT1section': {'CLIENT1key': None}} + >>> cfg + ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}}) + """ + out = {} + # scalars first + for i in range(len(self.scalars)): + entry = self.scalars[i] + try: + val = function(self, entry, **keywargs) + # bound again in case name has changed + entry = self.scalars[i] + out[entry] = val + except Exception: + if raise_errors: + raise + else: + entry = self.scalars[i] + out[entry] = False + # then sections + for i in range(len(self.sections)): + entry = self.sections[i] + if call_on_sections: + try: + function(self, entry, **keywargs) + except Exception: + if raise_errors: + raise + else: + entry = self.sections[i] + out[entry] = False + # bound again in case name has changed + entry = self.sections[i] + # previous result is discarded + out[entry] = self[entry].walk( + function, + raise_errors=raise_errors, + call_on_sections=call_on_sections, + **keywargs) + return out + + + def as_bool(self, key): + """ + Accepts a key as input. The corresponding value must be a string or + the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to + retain compatibility with Python 2.2. + + If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns + ``True``. + + If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns + ``False``. + + ``as_bool`` is not case sensitive. + + Any other input will raise a ``ValueError``. + + >>> a = ConfigObj() + >>> a['a'] = 'fish' + >>> a.as_bool('a') + Traceback (most recent call last): + ValueError: Value "fish" is neither True nor False + >>> a['b'] = 'True' + >>> a.as_bool('b') + 1 + >>> a['b'] = 'off' + >>> a.as_bool('b') + 0 + """ + val = self[key] + if val == True: + return True + elif val == False: + return False + else: + try: + if not isinstance(val, six.string_types): + # TODO: Why do we raise a KeyError here? + raise KeyError() + else: + return self.main._bools[val.lower()] + except KeyError: + raise ValueError('Value "%s" is neither True nor False' % val) + + + def as_int(self, key): + """ + A convenience method which coerces the specified value to an integer. + + If the value is an invalid literal for ``int``, a ``ValueError`` will + be raised. + + >>> a = ConfigObj() + >>> a['a'] = 'fish' + >>> a.as_int('a') + Traceback (most recent call last): + ValueError: invalid literal for int() with base 10: 'fish' + >>> a['b'] = '1' + >>> a.as_int('b') + 1 + >>> a['b'] = '3.2' + >>> a.as_int('b') + Traceback (most recent call last): + ValueError: invalid literal for int() with base 10: '3.2' + """ + return int(self[key]) + + + def as_float(self, key): + """ + A convenience method which coerces the specified value to a float. + + If the value is an invalid literal for ``float``, a ``ValueError`` will + be raised. + + >>> a = ConfigObj() + >>> a['a'] = 'fish' + >>> a.as_float('a') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ValueError: invalid literal for float(): fish + >>> a['b'] = '1' + >>> a.as_float('b') + 1.0 + >>> a['b'] = '3.2' + >>> a.as_float('b') #doctest: +ELLIPSIS + 3.2... + """ + return float(self[key]) + + + def as_list(self, key): + """ + A convenience method which fetches the specified value, guaranteeing + that it is a list. + + >>> a = ConfigObj() + >>> a['a'] = 1 + >>> a.as_list('a') + [1] + >>> a['a'] = (1,) + >>> a.as_list('a') + [1] + >>> a['a'] = [1] + >>> a.as_list('a') + [1] + """ + result = self[key] + if isinstance(result, (tuple, list)): + return list(result) + return [result] + + + def restore_default(self, key): + """ + Restore (and return) default value for the specified key. + + This method will only work for a ConfigObj that was created + with a configspec and has been validated. + + If there is no default value for this key, ``KeyError`` is raised. + """ + default = self.default_values[key] + dict.__setitem__(self, key, default) + if key not in self.defaults: + self.defaults.append(key) + return default + + + def restore_defaults(self): + """ + Recursively restore default values to all members + that have them. + + This method will only work for a ConfigObj that was created + with a configspec and has been validated. + + It doesn't delete or modify entries without default values. + """ + for key in self.default_values: + self.restore_default(key) + + for section in self.sections: + self[section].restore_defaults() + + +class ConfigObj(Section): + """An object to read, create, and write config files.""" + + _keyword = re.compile(r'''^ # line start + (\s*) # indentation + ( # keyword + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'"=].*?) # no quotes + ) + \s*=\s* # divider + (.*) # value (including list values and comments) + $ # line end + ''', + re.VERBOSE) + + _sectionmarker = re.compile(r'''^ + (\s*) # 1: indentation + ((?:\[\s*)+) # 2: section marker open + ( # 3: section name open + (?:"\s*\S.*?\s*")| # at least one non-space with double quotes + (?:'\s*\S.*?\s*')| # at least one non-space with single quotes + (?:[^'"\s].*?) # at least one non-space unquoted + ) # section name close + ((?:\s*\])+) # 4: section marker close + \s*(\#.*)? # 5: optional comment + $''', + re.VERBOSE) + + # this regexp pulls list values out as a single string + # or single values and comments + # FIXME: this regex adds a '' to the end of comma terminated lists + # workaround in ``_handle_value`` + _valueexp = re.compile(r'''^ + (?: + (?: + ( + (?: + (?: + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'",\#][^,\#]*?) # unquoted + ) + \s*,\s* # comma + )* # match all list items ending in a comma (if any) + ) + ( + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'",\#\s][^,]*?)| # unquoted + (?:(? 1: + msg = "Parsing failed with several errors.\nFirst error %s" % info + error = ConfigObjError(msg) + else: + error = self._errors[0] + # set the errors attribute; it's a list of tuples: + # (error_type, message, line_number) + error.errors = self._errors + # set the config attribute + error.config = self + raise error + # delete private attributes + del self._errors + + if configspec is None: + self.configspec = None + else: + self._handle_configspec(configspec) + + + def _initialise(self, options=None): + if options is None: + options = OPTION_DEFAULTS + + # initialise a few variables + self.filename = None + self._errors = [] + self.raise_errors = options['raise_errors'] + self.interpolation = options['interpolation'] + self.list_values = options['list_values'] + self.create_empty = options['create_empty'] + self.file_error = options['file_error'] + self.stringify = options['stringify'] + self.indent_type = options['indent_type'] + self.encoding = options['encoding'] + self.default_encoding = options['default_encoding'] + self.BOM = False + self.newlines = None + self.write_empty_values = options['write_empty_values'] + self.unrepr = options['unrepr'] + + self.initial_comment = [] + self.final_comment = [] + self.configspec = None + + if self._inspec: + self.list_values = False + + # Clear section attributes as well + Section._initialise(self) + + + def __repr__(self): + def _getval(key): + try: + return self[key] + except MissingInterpolationOption: + return dict.__getitem__(self, key) + return ('%s({%s})' % (self.__class__.__name__, + ', '.join([('%s: %s' % (repr(key), repr(_getval(key)))) + for key in (self.scalars + self.sections)]))) + + + def _handle_bom(self, infile): + """ + Handle any BOM, and decode if necessary. + + If an encoding is specified, that *must* be used - but the BOM should + still be removed (and the BOM attribute set). + + (If the encoding is wrongly specified, then a BOM for an alternative + encoding won't be discovered or removed.) + + If an encoding is not specified, UTF8 or UTF16 BOM will be detected and + removed. The BOM attribute will be set. UTF16 will be decoded to + unicode. + + NOTE: This method must not be called with an empty ``infile``. + + Specifying the *wrong* encoding is likely to cause a + ``UnicodeDecodeError``. + + ``infile`` must always be returned as a list of lines, but may be + passed in as a single string. + """ + + if ((self.encoding is not None) and + (self.encoding.lower() not in BOM_LIST)): + # No need to check for a BOM + # the encoding specified doesn't have one + # just decode + return self._decode(infile, self.encoding) + + if isinstance(infile, (list, tuple)): + line = infile[0] + else: + line = infile + + if isinstance(line, six.text_type): + # it's already decoded and there's no need to do anything + # else, just use the _decode utility method to handle + # listifying appropriately + return self._decode(infile, self.encoding) + + if self.encoding is not None: + # encoding explicitly supplied + # And it could have an associated BOM + # TODO: if encoding is just UTF16 - we ought to check for both + # TODO: big endian and little endian versions. + enc = BOM_LIST[self.encoding.lower()] + if enc == 'utf_16': + # For UTF16 we try big endian and little endian + for BOM, (encoding, final_encoding) in list(BOMS.items()): + if not final_encoding: + # skip UTF8 + continue + if infile.startswith(BOM): + ### BOM discovered + ##self.BOM = True + # Don't need to remove BOM + return self._decode(infile, encoding) + + # If we get this far, will *probably* raise a DecodeError + # As it doesn't appear to start with a BOM + return self._decode(infile, self.encoding) + + # Must be UTF8 + BOM = BOM_SET[enc] + if not line.startswith(BOM): + return self._decode(infile, self.encoding) + + newline = line[len(BOM):] + + # BOM removed + if isinstance(infile, (list, tuple)): + infile[0] = newline + else: + infile = newline + self.BOM = True + return self._decode(infile, self.encoding) + + # No encoding specified - so we need to check for UTF8/UTF16 + for BOM, (encoding, final_encoding) in list(BOMS.items()): + if not isinstance(line, six.binary_type) or not line.startswith(BOM): + # didn't specify a BOM, or it's not a bytestring + continue + else: + # BOM discovered + self.encoding = final_encoding + if not final_encoding: + self.BOM = True + # UTF8 + # remove BOM + newline = line[len(BOM):] + if isinstance(infile, (list, tuple)): + infile[0] = newline + else: + infile = newline + # UTF-8 + if isinstance(infile, six.text_type): + return infile.splitlines(True) + elif isinstance(infile, six.binary_type): + return infile.decode('utf-8').splitlines(True) + else: + return self._decode(infile, 'utf-8') + # UTF16 - have to decode + return self._decode(infile, encoding) + + + if six.PY2 and isinstance(line, str): + # don't actually do any decoding, since we're on python 2 and + # returning a bytestring is fine + return self._decode(infile, None) + # No BOM discovered and no encoding specified, default to UTF-8 + if isinstance(infile, six.binary_type): + return infile.decode('utf-8').splitlines(True) + else: + return self._decode(infile, 'utf-8') + + + def _a_to_u(self, aString): + """Decode ASCII strings to unicode if a self.encoding is specified.""" + if isinstance(aString, six.binary_type) and self.encoding: + return aString.decode(self.encoding) + else: + return aString + + + def _decode(self, infile, encoding): + """ + Decode infile to unicode. Using the specified encoding. + + if is a string, it also needs converting to a list. + """ + if isinstance(infile, six.string_types): + return infile.splitlines(True) + if isinstance(infile, six.binary_type): + # NOTE: Could raise a ``UnicodeDecodeError`` + if encoding: + return infile.decode(encoding).splitlines(True) + else: + return infile.splitlines(True) + + if encoding: + for i, line in enumerate(infile): + if isinstance(line, six.binary_type): + # NOTE: The isinstance test here handles mixed lists of unicode/string + # NOTE: But the decode will break on any non-string values + # NOTE: Or could raise a ``UnicodeDecodeError`` + infile[i] = line.decode(encoding) + return infile + + + def _decode_element(self, line): + """Decode element to unicode if necessary.""" + if isinstance(line, six.binary_type) and self.default_encoding: + return line.decode(self.default_encoding) + else: + return line + + + # TODO: this may need to be modified + def _str(self, value): + """ + Used by ``stringify`` within validate, to turn non-string values + into strings. + """ + if not isinstance(value, six.string_types): + # intentially 'str' because it's just whatever the "normal" + # string type is for the python version we're dealing with + return str(value) + else: + return value + + + def _parse(self, infile): + """Actually parse the config file.""" + temp_list_values = self.list_values + if self.unrepr: + self.list_values = False + + comment_list = [] + done_start = False + this_section = self + maxline = len(infile) - 1 + cur_index = -1 + reset_comment = False + + while cur_index < maxline: + if reset_comment: + comment_list = [] + cur_index += 1 + line = infile[cur_index] + sline = line.strip() + # do we have anything on the line ? + if not sline or sline.startswith('#'): + reset_comment = False + comment_list.append(line) + continue + + if not done_start: + # preserve initial comment + self.initial_comment = comment_list + comment_list = [] + done_start = True + + reset_comment = True + # first we check if it's a section marker + mat = self._sectionmarker.match(line) + if mat is not None: + # is a section line + (indent, sect_open, sect_name, sect_close, comment) = mat.groups() + if indent and (self.indent_type is None): + self.indent_type = indent + cur_depth = sect_open.count('[') + if cur_depth != sect_close.count(']'): + self._handle_error("Cannot compute the section depth", + NestingError, infile, cur_index) + continue + + if cur_depth < this_section.depth: + # the new section is dropping back to a previous level + try: + parent = self._match_depth(this_section, + cur_depth).parent + except SyntaxError: + self._handle_error("Cannot compute nesting level", + NestingError, infile, cur_index) + continue + elif cur_depth == this_section.depth: + # the new section is a sibling of the current section + parent = this_section.parent + elif cur_depth == this_section.depth + 1: + # the new section is a child the current section + parent = this_section + else: + self._handle_error("Section too nested", + NestingError, infile, cur_index) + continue + + sect_name = self._unquote(sect_name) + if sect_name in parent: + self._handle_error('Duplicate section name', + DuplicateError, infile, cur_index) + continue + + # create the new section + this_section = Section( + parent, + cur_depth, + self, + name=sect_name) + parent[sect_name] = this_section + parent.inline_comments[sect_name] = comment + parent.comments[sect_name] = comment_list + continue + # + # it's not a section marker, + # so it should be a valid ``key = value`` line + mat = self._keyword.match(line) + if mat is None: + self._handle_error( + 'Invalid line ({0!r}) (matched as neither section nor keyword)'.format(line), + ParseError, infile, cur_index) + else: + # is a keyword value + # value will include any inline comment + (indent, key, value) = mat.groups() + if indent and (self.indent_type is None): + self.indent_type = indent + # check for a multiline value + if value[:3] in ['"""', "'''"]: + try: + value, comment, cur_index = self._multiline( + value, infile, cur_index, maxline) + except SyntaxError: + self._handle_error( + 'Parse error in multiline value', + ParseError, infile, cur_index) + continue + else: + if self.unrepr: + comment = '' + try: + value = unrepr(value) + except Exception as e: + if type(e) == UnknownType: + msg = 'Unknown name or type in value' + else: + msg = 'Parse error from unrepr-ing multiline value' + self._handle_error(msg, UnreprError, infile, + cur_index) + continue + else: + if self.unrepr: + comment = '' + try: + value = unrepr(value) + except Exception as e: + if isinstance(e, UnknownType): + msg = 'Unknown name or type in value' + else: + msg = 'Parse error from unrepr-ing value' + self._handle_error(msg, UnreprError, infile, + cur_index) + continue + else: + # extract comment and lists + try: + (value, comment) = self._handle_value(value) + except SyntaxError: + self._handle_error( + 'Parse error in value', + ParseError, infile, cur_index) + continue + # + key = self._unquote(key) + if key in this_section: + self._handle_error( + 'Duplicate keyword name', + DuplicateError, infile, cur_index) + continue + # add the key. + # we set unrepr because if we have got this far we will never + # be creating a new section + this_section.__setitem__(key, value, unrepr=True) + this_section.inline_comments[key] = comment + this_section.comments[key] = comment_list + continue + # + if self.indent_type is None: + # no indentation used, set the type accordingly + self.indent_type = '' + + # preserve the final comment + if not self and not self.initial_comment: + self.initial_comment = comment_list + elif not reset_comment: + self.final_comment = comment_list + self.list_values = temp_list_values + + + def _match_depth(self, sect, depth): + """ + Given a section and a depth level, walk back through the sections + parents to see if the depth level matches a previous section. + + Return a reference to the right section, + or raise a SyntaxError. + """ + while depth < sect.depth: + if sect is sect.parent: + # we've reached the top level already + raise SyntaxError() + sect = sect.parent + if sect.depth == depth: + return sect + # shouldn't get here + raise SyntaxError() + + + def _handle_error(self, text, ErrorClass, infile, cur_index): + """ + Handle an error according to the error settings. + + Either raise the error or store it. + The error will have occured at ``cur_index`` + """ + line = infile[cur_index] + cur_index += 1 + message = '{0} at line {1}.'.format(text, cur_index) + error = ErrorClass(message, cur_index, line) + if self.raise_errors: + # raise the error - parsing stops here + raise error + # store the error + # reraise when parsing has finished + self._errors.append(error) + + + def _unquote(self, value): + """Return an unquoted version of a value""" + if not value: + # should only happen during parsing of lists + raise SyntaxError + if (value[0] == value[-1]) and (value[0] in ('"', "'")): + value = value[1:-1] + return value + + + def _quote(self, value, multiline=True): + """ + Return a safely quoted version of a value. + + Raise a ConfigObjError if the value cannot be safely quoted. + If multiline is ``True`` (default) then use triple quotes + if necessary. + + * Don't quote values that don't need it. + * Recursively quote members of a list and return a comma joined list. + * Multiline is ``False`` for lists. + * Obey list syntax for empty and single member lists. + + If ``list_values=False`` then the value is only quoted if it contains + a ``\\n`` (is multiline) or '#'. + + If ``write_empty_values`` is set, and the value is an empty string, it + won't be quoted. + """ + if multiline and self.write_empty_values and value == '': + # Only if multiline is set, so that it is used for values not + # keys, and not values that are part of a list + return '' + + if multiline and isinstance(value, (list, tuple)): + if not value: + return ',' + elif len(value) == 1: + return self._quote(value[0], multiline=False) + ',' + return ', '.join([self._quote(val, multiline=False) + for val in value]) + if not isinstance(value, six.string_types): + if self.stringify: + # intentially 'str' because it's just whatever the "normal" + # string type is for the python version we're dealing with + value = str(value) + else: + raise TypeError('Value "%s" is not a string.' % value) + + if not value: + return '""' + + no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value + need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value )) + hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value) + check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote + + if check_for_single: + if not self.list_values: + # we don't quote if ``list_values=False`` + quot = noquot + # for normal values either single or double quotes will do + elif '\n' in value: + # will only happen if multiline is off - e.g. '\n' in key + raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) + elif ((value[0] not in wspace_plus) and + (value[-1] not in wspace_plus) and + (',' not in value)): + quot = noquot + else: + quot = self._get_single_quote(value) + else: + # if value has '\n' or "'" *and* '"', it will need triple quotes + quot = self._get_triple_quote(value) + + if quot == noquot and '#' in value and self.list_values: + quot = self._get_single_quote(value) + + return quot % value + + + def _get_single_quote(self, value): + if ("'" in value) and ('"' in value): + raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) + elif '"' in value: + quot = squot + else: + quot = dquot + return quot + + + def _get_triple_quote(self, value): + if (value.find('"""') != -1) and (value.find("'''") != -1): + raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) + if value.find('"""') == -1: + quot = tdquot + else: + quot = tsquot + return quot + + + def _handle_value(self, value): + """ + Given a value string, unquote, remove comment, + handle lists. (including empty and single member lists) + """ + if self._inspec: + # Parsing a configspec so don't handle comments + return (value, '') + # do we look for lists in values ? + if not self.list_values: + mat = self._nolistvalue.match(value) + if mat is None: + raise SyntaxError() + # NOTE: we don't unquote here + return mat.groups() + # + mat = self._valueexp.match(value) + if mat is None: + # the value is badly constructed, probably badly quoted, + # or an invalid list + raise SyntaxError() + (list_values, single, empty_list, comment) = mat.groups() + if (list_values == '') and (single is None): + # change this if you want to accept empty values + raise SyntaxError() + # NOTE: note there is no error handling from here if the regex + # is wrong: then incorrect values will slip through + if empty_list is not None: + # the single comma - meaning an empty list + return ([], comment) + if single is not None: + # handle empty values + if list_values and not single: + # FIXME: the '' is a workaround because our regex now matches + # '' at the end of a list if it has a trailing comma + single = None + else: + single = single or '""' + single = self._unquote(single) + if list_values == '': + # not a list value + return (single, comment) + the_list = self._listvalueexp.findall(list_values) + the_list = [self._unquote(val) for val in the_list] + if single is not None: + the_list += [single] + return (the_list, comment) + + + def _multiline(self, value, infile, cur_index, maxline): + """Extract the value, where we are in a multiline situation.""" + quot = value[:3] + newvalue = value[3:] + single_line = self._triple_quote[quot][0] + multi_line = self._triple_quote[quot][1] + mat = single_line.match(value) + if mat is not None: + retval = list(mat.groups()) + retval.append(cur_index) + return retval + elif newvalue.find(quot) != -1: + # somehow the triple quote is missing + raise SyntaxError() + # + while cur_index < maxline: + cur_index += 1 + newvalue += '\n' + line = infile[cur_index] + if line.find(quot) == -1: + newvalue += line + else: + # end of multiline, process it + break + else: + # we've got to the end of the config, oops... + raise SyntaxError() + mat = multi_line.match(line) + if mat is None: + # a badly formed line + raise SyntaxError() + (value, comment) = mat.groups() + return (newvalue + value, comment, cur_index) + + + def _handle_configspec(self, configspec): + """Parse the configspec.""" + # FIXME: Should we check that the configspec was created with the + # correct settings ? (i.e. ``list_values=False``) + if not isinstance(configspec, ConfigObj): + try: + configspec = ConfigObj(configspec, + raise_errors=True, + file_error=True, + _inspec=True) + except ConfigObjError as e: + # FIXME: Should these errors have a reference + # to the already parsed ConfigObj ? + raise ConfigspecError('Parsing configspec failed: %s' % e) + except IOError as e: + raise IOError('Reading configspec failed: %s' % e) + + self.configspec = configspec + + + + def _set_configspec(self, section, copy): + """ + Called by validate. Handles setting the configspec on subsections + including sections to be validated by __many__ + """ + configspec = section.configspec + many = configspec.get('__many__') + if isinstance(many, dict): + for entry in section.sections: + if entry not in configspec: + section[entry].configspec = many + + for entry in configspec.sections: + if entry == '__many__': + continue + if entry not in section: + section[entry] = {} + section[entry]._created = True + if copy: + # copy comments + section.comments[entry] = configspec.comments.get(entry, []) + section.inline_comments[entry] = configspec.inline_comments.get(entry, '') + + # Could be a scalar when we expect a section + if isinstance(section[entry], Section): + section[entry].configspec = configspec[entry] + + + def _write_line(self, indent_string, entry, this_entry, comment): + """Write an individual line, for the write method""" + # NOTE: the calls to self._quote here handles non-StringType values. + if not self.unrepr: + val = self._decode_element(self._quote(this_entry)) + else: + val = repr(this_entry) + return '%s%s%s%s%s' % (indent_string, + self._decode_element(self._quote(entry, multiline=False)), + self._a_to_u(' = '), + val, + self._decode_element(comment)) + + + def _write_marker(self, indent_string, depth, entry, comment): + """Write a section marker line""" + return '%s%s%s%s%s' % (indent_string, + self._a_to_u('[' * depth), + self._quote(self._decode_element(entry), multiline=False), + self._a_to_u(']' * depth), + self._decode_element(comment)) + + + def _handle_comment(self, comment): + """Deal with a comment.""" + if not comment: + return '' + start = self.indent_type + if not comment.startswith('#'): + start += self._a_to_u(' # ') + return (start + comment) + + + # Public methods + + def write(self, outfile=None, section=None): + """ + Write the current ConfigObj as a file + + tekNico: FIXME: use StringIO instead of real files + + >>> filename = a.filename + >>> a.filename = 'test.ini' + >>> a.write() + >>> a.filename = filename + >>> a == ConfigObj('test.ini', raise_errors=True) + 1 + >>> import os + >>> os.remove('test.ini') + """ + if self.indent_type is None: + # this can be true if initialised from a dictionary + self.indent_type = DEFAULT_INDENT_TYPE + + out = [] + cs = self._a_to_u('#') + csp = self._a_to_u('# ') + if section is None: + int_val = self.interpolation + self.interpolation = False + section = self + for line in self.initial_comment: + line = self._decode_element(line) + stripped_line = line.strip() + if stripped_line and not stripped_line.startswith(cs): + line = csp + line + out.append(line) + + indent_string = self.indent_type * section.depth + for entry in (section.scalars + section.sections): + if entry in section.defaults: + # don't write out default values + continue + for comment_line in section.comments[entry]: + comment_line = self._decode_element(comment_line.lstrip()) + if comment_line and not comment_line.startswith(cs): + comment_line = csp + comment_line + out.append(indent_string + comment_line) + this_entry = section[entry] + comment = self._handle_comment(section.inline_comments[entry]) + + if isinstance(this_entry, Section): + # a section + out.append(self._write_marker( + indent_string, + this_entry.depth, + entry, + comment)) + out.extend(self.write(section=this_entry)) + else: + out.append(self._write_line( + indent_string, + entry, + this_entry, + comment)) + + if section is self: + for line in self.final_comment: + line = self._decode_element(line) + stripped_line = line.strip() + if stripped_line and not stripped_line.startswith(cs): + line = csp + line + out.append(line) + self.interpolation = int_val + + if section is not self: + return out + + if (self.filename is None) and (outfile is None): + # output a list of lines + # might need to encode + # NOTE: This will *screw* UTF16, each line will start with the BOM + if self.encoding: + out = [l.encode(self.encoding) for l in out] + if (self.BOM and ((self.encoding is None) or + (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))): + # Add the UTF8 BOM + if not out: + out.append('') + out[0] = BOM_UTF8 + out[0] + return out + + # Turn the list to a string, joined with correct newlines + newline = self.newlines or os.linesep + if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w' + and sys.platform == 'win32' and newline == '\r\n'): + # Windows specific hack to avoid writing '\r\r\n' + newline = '\n' + output = self._a_to_u(newline).join(out) + if not output.endswith(newline): + output += newline + + if isinstance(output, six.binary_type): + output_bytes = output + else: + output_bytes = output.encode(self.encoding or + self.default_encoding or + 'ascii') + + if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)): + # Add the UTF8 BOM + output_bytes = BOM_UTF8 + output_bytes + + if outfile is not None: + outfile.write(output_bytes) + else: + with open(self.filename, 'wb') as h: + h.write(output_bytes) + + def validate(self, validator, preserve_errors=False, copy=False, + section=None): + """ + Test the ConfigObj against a configspec. + + It uses the ``validator`` object from *validate.py*. + + To run ``validate`` on the current ConfigObj, call: :: + + test = config.validate(validator) + + (Normally having previously passed in the configspec when the ConfigObj + was created - you can dynamically assign a dictionary of checks to the + ``configspec`` attribute of a section though). + + It returns ``True`` if everything passes, or a dictionary of + pass/fails (True/False). If every member of a subsection passes, it + will just have the value ``True``. (It also returns ``False`` if all + members fail). + + In addition, it converts the values from strings to their native + types if their checks pass (and ``stringify`` is set). + + If ``preserve_errors`` is ``True`` (``False`` is default) then instead + of a marking a fail with a ``False``, it will preserve the actual + exception object. This can contain info about the reason for failure. + For example the ``VdtValueTooSmallError`` indicates that the value + supplied was too small. If a value (or section) is missing it will + still be marked as ``False``. + + You must have the validate module to use ``preserve_errors=True``. + + You can then use the ``flatten_errors`` function to turn your nested + results dictionary into a flattened list of failures - useful for + displaying meaningful error messages. + """ + if section is None: + if self.configspec is None: + raise ValueError('No configspec supplied.') + if preserve_errors: + # We do this once to remove a top level dependency on the validate module + # Which makes importing configobj faster + from validate import VdtMissingValue + self._vdtMissingValue = VdtMissingValue + + section = self + + if copy: + section.initial_comment = section.configspec.initial_comment + section.final_comment = section.configspec.final_comment + section.encoding = section.configspec.encoding + section.BOM = section.configspec.BOM + section.newlines = section.configspec.newlines + section.indent_type = section.configspec.indent_type + + # + # section.default_values.clear() #?? + configspec = section.configspec + self._set_configspec(section, copy) + + + def validate_entry(entry, spec, val, missing, ret_true, ret_false): + section.default_values.pop(entry, None) + + try: + section.default_values[entry] = validator.get_default_value(configspec[entry]) + except (KeyError, AttributeError, validator.baseErrorClass): + # No default, bad default or validator has no 'get_default_value' + # (e.g. SimpleVal) + pass + + try: + check = validator.check(spec, + val, + missing=missing + ) + except validator.baseErrorClass as e: + if not preserve_errors or isinstance(e, self._vdtMissingValue): + out[entry] = False + else: + # preserve the error + out[entry] = e + ret_false = False + ret_true = False + else: + ret_false = False + out[entry] = True + if self.stringify or missing: + # if we are doing type conversion + # or the value is a supplied default + if not self.stringify: + if isinstance(check, (list, tuple)): + # preserve lists + check = [self._str(item) for item in check] + elif missing and check is None: + # convert the None from a default to a '' + check = '' + else: + check = self._str(check) + if (check != val) or missing: + section[entry] = check + if not copy and missing and entry not in section.defaults: + section.defaults.append(entry) + return ret_true, ret_false + + # + out = {} + ret_true = True + ret_false = True + + unvalidated = [k for k in section.scalars if k not in configspec] + incorrect_sections = [k for k in configspec.sections if k in section.scalars] + incorrect_scalars = [k for k in configspec.scalars if k in section.sections] + + for entry in configspec.scalars: + if entry in ('__many__', '___many___'): + # reserved names + continue + if (not entry in section.scalars) or (entry in section.defaults): + # missing entries + # or entries from defaults + missing = True + val = None + if copy and entry not in section.scalars: + # copy comments + section.comments[entry] = ( + configspec.comments.get(entry, [])) + section.inline_comments[entry] = ( + configspec.inline_comments.get(entry, '')) + # + else: + missing = False + val = section[entry] + + ret_true, ret_false = validate_entry(entry, configspec[entry], val, + missing, ret_true, ret_false) + + many = None + if '__many__' in configspec.scalars: + many = configspec['__many__'] + elif '___many___' in configspec.scalars: + many = configspec['___many___'] + + if many is not None: + for entry in unvalidated: + val = section[entry] + ret_true, ret_false = validate_entry(entry, many, val, False, + ret_true, ret_false) + unvalidated = [] + + for entry in incorrect_scalars: + ret_true = False + if not preserve_errors: + out[entry] = False + else: + ret_false = False + msg = 'Value %r was provided as a section' % entry + out[entry] = validator.baseErrorClass(msg) + for entry in incorrect_sections: + ret_true = False + if not preserve_errors: + out[entry] = False + else: + ret_false = False + msg = 'Section %r was provided as a single value' % entry + out[entry] = validator.baseErrorClass(msg) + + # Missing sections will have been created as empty ones when the + # configspec was read. + for entry in section.sections: + # FIXME: this means DEFAULT is not copied in copy mode + if section is self and entry == 'DEFAULT': + continue + if section[entry].configspec is None: + unvalidated.append(entry) + continue + if copy: + section.comments[entry] = configspec.comments.get(entry, []) + section.inline_comments[entry] = configspec.inline_comments.get(entry, '') + check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry]) + out[entry] = check + if check == False: + ret_true = False + elif check == True: + ret_false = False + else: + ret_true = False + + section.extra_values = unvalidated + if preserve_errors and not section._created: + # If the section wasn't created (i.e. it wasn't missing) + # then we can't return False, we need to preserve errors + ret_false = False + # + if ret_false and preserve_errors and out: + # If we are preserving errors, but all + # the failures are from missing sections / values + # then we can return False. Otherwise there is a + # real failure that we need to preserve. + ret_false = not any(out.values()) + if ret_true: + return True + elif ret_false: + return False + return out + + + def reset(self): + """Clear ConfigObj instance and restore to 'freshly created' state.""" + self.clear() + self._initialise() + # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload) + # requires an empty dictionary + self.configspec = None + # Just to be sure ;-) + self._original_configspec = None + + + def reload(self): + """ + Reload a ConfigObj from file. + + This method raises a ``ReloadError`` if the ConfigObj doesn't have + a filename attribute pointing to a file. + """ + if not isinstance(self.filename, six.string_types): + raise ReloadError() + + filename = self.filename + current_options = {} + for entry in OPTION_DEFAULTS: + if entry == 'configspec': + continue + current_options[entry] = getattr(self, entry) + + configspec = self._original_configspec + current_options['configspec'] = configspec + + self.clear() + self._initialise(current_options) + self._load(filename, configspec) + + + +class SimpleVal(object): + """ + A simple validator. + Can be used to check that all members expected are present. + + To use it, provide a configspec with all your members in (the value given + will be ignored). Pass an instance of ``SimpleVal`` to the ``validate`` + method of your ``ConfigObj``. ``validate`` will return ``True`` if all + members are present, or a dictionary with True/False meaning + present/missing. (Whole missing sections will be replaced with ``False``) + """ + + def __init__(self): + self.baseErrorClass = ConfigObjError + + def check(self, check, member, missing=False): + """A dummy check method, always returns the value unchanged.""" + if missing: + raise self.baseErrorClass() + return member + + +def flatten_errors(cfg, res, levels=None, results=None): + """ + An example function that will turn a nested dictionary of results + (as returned by ``ConfigObj.validate``) into a flat list. + + ``cfg`` is the ConfigObj instance being checked, ``res`` is the results + dictionary returned by ``validate``. + + (This is a recursive function, so you shouldn't use the ``levels`` or + ``results`` arguments - they are used by the function.) + + Returns a list of keys that failed. Each member of the list is a tuple:: + + ([list of sections...], key, result) + + If ``validate`` was called with ``preserve_errors=False`` (the default) + then ``result`` will always be ``False``. + + *list of sections* is a flattened list of sections that the key was found + in. + + If the section was missing (or a section was expected and a scalar provided + - or vice-versa) then key will be ``None``. + + If the value (or section) was missing then ``result`` will be ``False``. + + If ``validate`` was called with ``preserve_errors=True`` and a value + was present, but failed the check, then ``result`` will be the exception + object returned. You can use this as a string that describes the failure. + + For example *The value "3" is of the wrong type*. + """ + if levels is None: + # first time called + levels = [] + results = [] + if res == True: + return sorted(results) + if res == False or isinstance(res, Exception): + results.append((levels[:], None, res)) + if levels: + levels.pop() + return sorted(results) + for (key, val) in list(res.items()): + if val == True: + continue + if isinstance(cfg.get(key), collections.Mapping): + # Go down one level + levels.append(key) + flatten_errors(cfg[key], val, levels, results) + continue + results.append((levels[:], key, val)) + # + # Go up one level + if levels: + levels.pop() + # + return sorted(results) + + +def get_extra_values(conf, _prepend=()): + """ + Find all the values and sections not in the configspec from a validated + ConfigObj. + + ``get_extra_values`` returns a list of tuples where each tuple represents + either an extra section, or an extra value. + + The tuples contain two values, a tuple representing the section the value + is in and the name of the extra values. For extra values in the top level + section the first member will be an empty tuple. For values in the 'foo' + section the first member will be ``('foo',)``. For members in the 'bar' + subsection of the 'foo' section the first member will be ``('foo', 'bar')``. + + NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't + been validated it will return an empty list. + """ + out = [] + + out.extend([(_prepend, name) for name in conf.extra_values]) + for name in conf.sections: + if name not in conf.extra_values: + out.extend(get_extra_values(conf[name], _prepend + (name,))) + return out + + +"""*A programming language is a medium of expression.* - Paul Graham""" diff --git a/astropy/extern/configobj/validate.py b/astropy/extern/configobj/validate.py new file mode 100755 index 0000000..95727f6 --- /dev/null +++ b/astropy/extern/configobj/validate.py @@ -0,0 +1,1473 @@ +# validate.py +# A Validator object +# Copyright (C) 2005-2014: +# (name) : (email) +# Michael Foord: fuzzyman AT voidspace DOT org DOT uk +# Mark Andrews: mark AT la-la DOT com +# Nicola Larosa: nico AT tekNico DOT net +# Rob Dennis: rdennis AT gmail DOT com +# Eli Courtwright: eli AT courtwright DOT org + +# This software is licensed under the terms of the BSD license. +# http://opensource.org/licenses/BSD-3-Clause + +# ConfigObj 5 - main repository for documentation and issue tracking: +# https://github.com/DiffSK/configobj + +""" + The Validator object is used to check that supplied values + conform to a specification. + + The value can be supplied as a string - e.g. from a config file. + In this case the check will also *convert* the value to + the required type. This allows you to add validation + as a transparent layer to access data stored as strings. + The validation checks that the data is correct *and* + converts it to the expected type. + + Some standard checks are provided for basic data types. + Additional checks are easy to write. They can be + provided when the ``Validator`` is instantiated or + added afterwards. + + The standard functions work with the following basic data types : + + * integers + * floats + * booleans + * strings + * ip_addr + + plus lists of these datatypes + + Adding additional checks is done through coding simple functions. + + The full set of standard checks are : + + * 'integer': matches integer values (including negative) + Takes optional 'min' and 'max' arguments : :: + + integer() + integer(3, 9) # any value from 3 to 9 + integer(min=0) # any positive value + integer(max=9) + + * 'float': matches float values + Has the same parameters as the integer check. + + * 'boolean': matches boolean values - ``True`` or ``False`` + Acceptable string values for True are : + true, on, yes, 1 + Acceptable string values for False are : + false, off, no, 0 + + Any other value raises an error. + + * 'ip_addr': matches an Internet Protocol address, v.4, represented + by a dotted-quad string, i.e. '1.2.3.4'. + + * 'string': matches any string. + Takes optional keyword args 'min' and 'max' + to specify min and max lengths of the string. + + * 'list': matches any list. + Takes optional keyword args 'min', and 'max' to specify min and + max sizes of the list. (Always returns a list.) + + * 'tuple': matches any tuple. + Takes optional keyword args 'min', and 'max' to specify min and + max sizes of the tuple. (Always returns a tuple.) + + * 'int_list': Matches a list of integers. + Takes the same arguments as list. + + * 'float_list': Matches a list of floats. + Takes the same arguments as list. + + * 'bool_list': Matches a list of boolean values. + Takes the same arguments as list. + + * 'ip_addr_list': Matches a list of IP addresses. + Takes the same arguments as list. + + * 'string_list': Matches a list of strings. + Takes the same arguments as list. + + * 'mixed_list': Matches a list with different types in + specific positions. List size must match + the number of arguments. + + Each position can be one of : + 'integer', 'float', 'ip_addr', 'string', 'boolean' + + So to specify a list with two strings followed + by two integers, you write the check as : :: + + mixed_list('string', 'string', 'integer', 'integer') + + * 'pass': This check matches everything ! It never fails + and the value is unchanged. + + It is also the default if no check is specified. + + * 'option': This check matches any from a list of options. + You specify this check with : :: + + option('option 1', 'option 2', 'option 3') + + You can supply a default value (returned if no value is supplied) + using the default keyword argument. + + You specify a list argument for default using a list constructor syntax in + the check : :: + + checkname(arg1, arg2, default=list('val 1', 'val 2', 'val 3')) + + A badly formatted set of arguments will raise a ``VdtParamError``. +""" + +__version__ = '1.0.1' + + +__all__ = ( + '__version__', + 'dottedQuadToNum', + 'numToDottedQuad', + 'ValidateError', + 'VdtUnknownCheckError', + 'VdtParamError', + 'VdtTypeError', + 'VdtValueError', + 'VdtValueTooSmallError', + 'VdtValueTooBigError', + 'VdtValueTooShortError', + 'VdtValueTooLongError', + 'VdtMissingValue', + 'Validator', + 'is_integer', + 'is_float', + 'is_boolean', + 'is_list', + 'is_tuple', + 'is_ip_addr', + 'is_string', + 'is_int_list', + 'is_bool_list', + 'is_float_list', + 'is_string_list', + 'is_ip_addr_list', + 'is_mixed_list', + 'is_option', + '__docformat__', +) + + +import re +import sys +from pprint import pprint +from ...extern.six.moves import zip + +#TODO - #21 - six is part of the repo now, but we didn't switch over to it here +# this could be replaced if six is used for compatibility, or there are no +# more assertions about items being a string +if sys.version_info < (3,): + string_type = basestring +else: + string_type = str + # so tests that care about unicode on 2.x can specify unicode, and the same + # tests when run on 3.x won't complain about a undefined name "unicode" + # since all strings are unicode on 3.x we just want to pass it through + # unchanged + unicode = lambda x: x + # in python 3, all ints are equivalent to python 2 longs, and they'll + # never show "L" in the repr + long = int + +_list_arg = re.compile(r''' + (?: + ([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*list\( + ( + (?: + \s* + (?: + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'",\s\)][^,\)]*?) # unquoted + ) + \s*,\s* + )* + (?: + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'",\s\)][^,\)]*?) # unquoted + )? # last one + ) + \) + ) +''', re.VERBOSE | re.DOTALL) # two groups + +_list_members = re.compile(r''' + ( + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'",\s=][^,=]*?) # unquoted + ) + (?: + (?:\s*,\s*)|(?:\s*$) # comma + ) +''', re.VERBOSE | re.DOTALL) # one group + +_paramstring = r''' + (?: + ( + (?: + [a-zA-Z_][a-zA-Z0-9_]*\s*=\s*list\( + (?: + \s* + (?: + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'",\s\)][^,\)]*?) # unquoted + ) + \s*,\s* + )* + (?: + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'",\s\)][^,\)]*?) # unquoted + )? # last one + \) + )| + (?: + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'",\s=][^,=]*?)| # unquoted + (?: # keyword argument + [a-zA-Z_][a-zA-Z0-9_]*\s*=\s* + (?: + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'",\s=][^,=]*?) # unquoted + ) + ) + ) + ) + (?: + (?:\s*,\s*)|(?:\s*$) # comma + ) + ) + ''' + +_matchstring = '^%s*' % _paramstring + +# Python pre 2.2.1 doesn't have bool +try: + bool +except NameError: + def bool(val): + """Simple boolean equivalent function. """ + if val: + return 1 + else: + return 0 + + +def dottedQuadToNum(ip): + """ + Convert decimal dotted quad string to long integer + + >>> int(dottedQuadToNum('1 ')) + 1 + >>> int(dottedQuadToNum(' 1.2')) + 16777218 + >>> int(dottedQuadToNum(' 1.2.3 ')) + 16908291 + >>> int(dottedQuadToNum('1.2.3.4')) + 16909060 + >>> dottedQuadToNum('255.255.255.255') + 4294967295 + >>> dottedQuadToNum('255.255.255.256') + Traceback (most recent call last): + ValueError: Not a good dotted-quad IP: 255.255.255.256 + """ + + # import here to avoid it when ip_addr values are not used + import socket, struct + + try: + return struct.unpack('!L', + socket.inet_aton(ip.strip()))[0] + except socket.error: + raise ValueError('Not a good dotted-quad IP: %s' % ip) + return + + +def numToDottedQuad(num): + """ + Convert int or long int to dotted quad string + + >>> numToDottedQuad(long(-1)) + Traceback (most recent call last): + ValueError: Not a good numeric IP: -1 + >>> numToDottedQuad(long(1)) + '0.0.0.1' + >>> numToDottedQuad(long(16777218)) + '1.0.0.2' + >>> numToDottedQuad(long(16908291)) + '1.2.0.3' + >>> numToDottedQuad(long(16909060)) + '1.2.3.4' + >>> numToDottedQuad(long(4294967295)) + '255.255.255.255' + >>> numToDottedQuad(long(4294967296)) + Traceback (most recent call last): + ValueError: Not a good numeric IP: 4294967296 + >>> numToDottedQuad(-1) + Traceback (most recent call last): + ValueError: Not a good numeric IP: -1 + >>> numToDottedQuad(1) + '0.0.0.1' + >>> numToDottedQuad(16777218) + '1.0.0.2' + >>> numToDottedQuad(16908291) + '1.2.0.3' + >>> numToDottedQuad(16909060) + '1.2.3.4' + >>> numToDottedQuad(4294967295) + '255.255.255.255' + >>> numToDottedQuad(4294967296) + Traceback (most recent call last): + ValueError: Not a good numeric IP: 4294967296 + + """ + + # import here to avoid it when ip_addr values are not used + import socket, struct + + # no need to intercept here, 4294967295L is fine + if num > long(4294967295) or num < 0: + raise ValueError('Not a good numeric IP: %s' % num) + try: + return socket.inet_ntoa( + struct.pack('!L', long(num))) + except (socket.error, struct.error, OverflowError): + raise ValueError('Not a good numeric IP: %s' % num) + + +class ValidateError(Exception): + """ + This error indicates that the check failed. + It can be the base class for more specific errors. + + Any check function that fails ought to raise this error. + (or a subclass) + + >>> raise ValidateError + Traceback (most recent call last): + ValidateError + """ + + +class VdtMissingValue(ValidateError): + """No value was supplied to a check that needed one.""" + + +class VdtUnknownCheckError(ValidateError): + """An unknown check function was requested""" + + def __init__(self, value): + """ + >>> raise VdtUnknownCheckError('yoda') + Traceback (most recent call last): + VdtUnknownCheckError: the check "yoda" is unknown. + """ + ValidateError.__init__(self, 'the check "%s" is unknown.' % (value,)) + + +class VdtParamError(SyntaxError): + """An incorrect parameter was passed""" + + def __init__(self, name, value): + """ + >>> raise VdtParamError('yoda', 'jedi') + Traceback (most recent call last): + VdtParamError: passed an incorrect value "jedi" for parameter "yoda". + """ + SyntaxError.__init__(self, 'passed an incorrect value "%s" for parameter "%s".' % (value, name)) + + +class VdtTypeError(ValidateError): + """The value supplied was of the wrong type""" + + def __init__(self, value): + """ + >>> raise VdtTypeError('jedi') + Traceback (most recent call last): + VdtTypeError: the value "jedi" is of the wrong type. + """ + ValidateError.__init__(self, 'the value "%s" is of the wrong type.' % (value,)) + + +class VdtValueError(ValidateError): + """The value supplied was of the correct type, but was not an allowed value.""" + + def __init__(self, value): + """ + >>> raise VdtValueError('jedi') + Traceback (most recent call last): + VdtValueError: the value "jedi" is unacceptable. + """ + ValidateError.__init__(self, 'the value "%s" is unacceptable.' % (value,)) + + +class VdtValueTooSmallError(VdtValueError): + """The value supplied was of the correct type, but was too small.""" + + def __init__(self, value): + """ + >>> raise VdtValueTooSmallError('0') + Traceback (most recent call last): + VdtValueTooSmallError: the value "0" is too small. + """ + ValidateError.__init__(self, 'the value "%s" is too small.' % (value,)) + + +class VdtValueTooBigError(VdtValueError): + """The value supplied was of the correct type, but was too big.""" + + def __init__(self, value): + """ + >>> raise VdtValueTooBigError('1') + Traceback (most recent call last): + VdtValueTooBigError: the value "1" is too big. + """ + ValidateError.__init__(self, 'the value "%s" is too big.' % (value,)) + + +class VdtValueTooShortError(VdtValueError): + """The value supplied was of the correct type, but was too short.""" + + def __init__(self, value): + """ + >>> raise VdtValueTooShortError('jed') + Traceback (most recent call last): + VdtValueTooShortError: the value "jed" is too short. + """ + ValidateError.__init__( + self, + 'the value "%s" is too short.' % (value,)) + + +class VdtValueTooLongError(VdtValueError): + """The value supplied was of the correct type, but was too long.""" + + def __init__(self, value): + """ + >>> raise VdtValueTooLongError('jedie') + Traceback (most recent call last): + VdtValueTooLongError: the value "jedie" is too long. + """ + ValidateError.__init__(self, 'the value "%s" is too long.' % (value,)) + + +class Validator(object): + """ + Validator is an object that allows you to register a set of 'checks'. + These checks take input and test that it conforms to the check. + + This can also involve converting the value from a string into + the correct datatype. + + The ``check`` method takes an input string which configures which + check is to be used and applies that check to a supplied value. + + An example input string would be: + 'int_range(param1, param2)' + + You would then provide something like: + + >>> def int_range_check(value, min, max): + ... # turn min and max from strings to integers + ... min = int(min) + ... max = int(max) + ... # check that value is of the correct type. + ... # possible valid inputs are integers or strings + ... # that represent integers + ... if not isinstance(value, (int, long, string_type)): + ... raise VdtTypeError(value) + ... elif isinstance(value, string_type): + ... # if we are given a string + ... # attempt to convert to an integer + ... try: + ... value = int(value) + ... except ValueError: + ... raise VdtValueError(value) + ... # check the value is between our constraints + ... if not min <= value: + ... raise VdtValueTooSmallError(value) + ... if not value <= max: + ... raise VdtValueTooBigError(value) + ... return value + + >>> fdict = {'int_range': int_range_check} + >>> vtr1 = Validator(fdict) + >>> vtr1.check('int_range(20, 40)', '30') + 30 + >>> vtr1.check('int_range(20, 40)', '60') + Traceback (most recent call last): + VdtValueTooBigError: the value "60" is too big. + + New functions can be added with : :: + + >>> vtr2 = Validator() + >>> vtr2.functions['int_range'] = int_range_check + + Or by passing in a dictionary of functions when Validator + is instantiated. + + Your functions *can* use keyword arguments, + but the first argument should always be 'value'. + + If the function doesn't take additional arguments, + the parentheses are optional in the check. + It can be written with either of : :: + + keyword = function_name + keyword = function_name() + + The first program to utilise Validator() was Michael Foord's + ConfigObj, an alternative to ConfigParser which supports lists and + can validate a config file using a config schema. + For more details on using Validator with ConfigObj see: + https://configobj.readthedocs.org/en/latest/configobj.html + """ + + # this regex does the initial parsing of the checks + _func_re = re.compile(r'(.+?)\((.*)\)', re.DOTALL) + + # this regex takes apart keyword arguments + _key_arg = re.compile(r'^([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.*)$', re.DOTALL) + + + # this regex finds keyword=list(....) type values + _list_arg = _list_arg + + # this regex takes individual values out of lists - in one pass + _list_members = _list_members + + # These regexes check a set of arguments for validity + # and then pull the members out + _paramfinder = re.compile(_paramstring, re.VERBOSE | re.DOTALL) + _matchfinder = re.compile(_matchstring, re.VERBOSE | re.DOTALL) + + + def __init__(self, functions=None): + """ + >>> vtri = Validator() + """ + self.functions = { + '': self._pass, + 'integer': is_integer, + 'float': is_float, + 'boolean': is_boolean, + 'ip_addr': is_ip_addr, + 'string': is_string, + 'list': is_list, + 'tuple': is_tuple, + 'int_list': is_int_list, + 'float_list': is_float_list, + 'bool_list': is_bool_list, + 'ip_addr_list': is_ip_addr_list, + 'string_list': is_string_list, + 'mixed_list': is_mixed_list, + 'pass': self._pass, + 'option': is_option, + 'force_list': force_list, + } + if functions is not None: + self.functions.update(functions) + # tekNico: for use by ConfigObj + self.baseErrorClass = ValidateError + self._cache = {} + + + def check(self, check, value, missing=False): + """ + Usage: check(check, value) + + Arguments: + check: string representing check to apply (including arguments) + value: object to be checked + Returns value, converted to correct type if necessary + + If the check fails, raises a ``ValidateError`` subclass. + + >>> vtor.check('yoda', '') + Traceback (most recent call last): + VdtUnknownCheckError: the check "yoda" is unknown. + >>> vtor.check('yoda()', '') + Traceback (most recent call last): + VdtUnknownCheckError: the check "yoda" is unknown. + + >>> vtor.check('string(default="")', '', missing=True) + '' + """ + fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check) + + if missing: + if default is None: + # no information needed here - to be handled by caller + raise VdtMissingValue() + value = self._handle_none(default) + + if value is None: + return None + + return self._check_value(value, fun_name, fun_args, fun_kwargs) + + + def _handle_none(self, value): + if value == 'None': + return None + elif value in ("'None'", '"None"'): + # Special case a quoted None + value = self._unquote(value) + return value + + + def _parse_with_caching(self, check): + if check in self._cache: + fun_name, fun_args, fun_kwargs, default = self._cache[check] + # We call list and dict below to work with *copies* of the data + # rather than the original (which are mutable of course) + fun_args = list(fun_args) + fun_kwargs = dict(fun_kwargs) + else: + fun_name, fun_args, fun_kwargs, default = self._parse_check(check) + fun_kwargs = dict([(str(key), value) for (key, value) in list(fun_kwargs.items())]) + self._cache[check] = fun_name, list(fun_args), dict(fun_kwargs), default + return fun_name, fun_args, fun_kwargs, default + + + def _check_value(self, value, fun_name, fun_args, fun_kwargs): + try: + fun = self.functions[fun_name] + except KeyError: + raise VdtUnknownCheckError(fun_name) + else: + return fun(value, *fun_args, **fun_kwargs) + + + def _parse_check(self, check): + fun_match = self._func_re.match(check) + if fun_match: + fun_name = fun_match.group(1) + arg_string = fun_match.group(2) + arg_match = self._matchfinder.match(arg_string) + if arg_match is None: + # Bad syntax + raise VdtParamError('Bad syntax in check "%s".' % check) + fun_args = [] + fun_kwargs = {} + # pull out args of group 2 + for arg in self._paramfinder.findall(arg_string): + # args may need whitespace removing (before removing quotes) + arg = arg.strip() + listmatch = self._list_arg.match(arg) + if listmatch: + key, val = self._list_handle(listmatch) + fun_kwargs[key] = val + continue + keymatch = self._key_arg.match(arg) + if keymatch: + val = keymatch.group(2) + if not val in ("'None'", '"None"'): + # Special case a quoted None + val = self._unquote(val) + fun_kwargs[keymatch.group(1)] = val + continue + + fun_args.append(self._unquote(arg)) + else: + # allows for function names without (args) + return check, (), {}, None + + # Default must be deleted if the value is specified too, + # otherwise the check function will get a spurious "default" keyword arg + default = fun_kwargs.pop('default', None) + return fun_name, fun_args, fun_kwargs, default + + + def _unquote(self, val): + """Unquote a value if necessary.""" + if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]): + val = val[1:-1] + return val + + + def _list_handle(self, listmatch): + """Take apart a ``keyword=list('val, 'val')`` type string.""" + out = [] + name = listmatch.group(1) + args = listmatch.group(2) + for arg in self._list_members.findall(args): + out.append(self._unquote(arg)) + return name, out + + + def _pass(self, value): + """ + Dummy check that always passes + + >>> vtor.check('', 0) + 0 + >>> vtor.check('', '0') + '0' + """ + return value + + + def get_default_value(self, check): + """ + Given a check, return the default value for the check + (converted to the right type). + + If the check doesn't specify a default value then a + ``KeyError`` will be raised. + """ + fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check) + if default is None: + raise KeyError('Check "%s" has no default value.' % check) + value = self._handle_none(default) + if value is None: + return value + return self._check_value(value, fun_name, fun_args, fun_kwargs) + + +def _is_num_param(names, values, to_float=False): + """ + Return numbers from inputs or raise VdtParamError. + + Lets ``None`` pass through. + Pass in keyword argument ``to_float=True`` to + use float for the conversion rather than int. + + >>> _is_num_param(('', ''), (0, 1.0)) + [0, 1] + >>> _is_num_param(('', ''), (0, 1.0), to_float=True) + [0.0, 1.0] + >>> _is_num_param(('a'), ('a')) + Traceback (most recent call last): + VdtParamError: passed an incorrect value "a" for parameter "a". + """ + fun = to_float and float or int + out_params = [] + for (name, val) in zip(names, values): + if val is None: + out_params.append(val) + elif isinstance(val, (int, long, float, string_type)): + try: + out_params.append(fun(val)) + except ValueError as e: + raise VdtParamError(name, val) + else: + raise VdtParamError(name, val) + return out_params + + +# built in checks +# you can override these by setting the appropriate name +# in Validator.functions +# note: if the params are specified wrongly in your input string, +# you will also raise errors. + +def is_integer(value, min=None, max=None): + """ + A check that tests that a given value is an integer (int, or long) + and optionally, between bounds. A negative value is accepted, while + a float will fail. + + If the value is a string, then the conversion is done - if possible. + Otherwise a VdtError is raised. + + >>> vtor.check('integer', '-1') + -1 + >>> vtor.check('integer', '0') + 0 + >>> vtor.check('integer', 9) + 9 + >>> vtor.check('integer', 'a') + Traceback (most recent call last): + VdtTypeError: the value "a" is of the wrong type. + >>> vtor.check('integer', '2.2') + Traceback (most recent call last): + VdtTypeError: the value "2.2" is of the wrong type. + >>> vtor.check('integer(10)', '20') + 20 + >>> vtor.check('integer(max=20)', '15') + 15 + >>> vtor.check('integer(10)', '9') + Traceback (most recent call last): + VdtValueTooSmallError: the value "9" is too small. + >>> vtor.check('integer(10)', 9) + Traceback (most recent call last): + VdtValueTooSmallError: the value "9" is too small. + >>> vtor.check('integer(max=20)', '35') + Traceback (most recent call last): + VdtValueTooBigError: the value "35" is too big. + >>> vtor.check('integer(max=20)', 35) + Traceback (most recent call last): + VdtValueTooBigError: the value "35" is too big. + >>> vtor.check('integer(0, 9)', False) + 0 + """ + (min_val, max_val) = _is_num_param(('min', 'max'), (min, max)) + if not isinstance(value, (int, long, string_type)): + raise VdtTypeError(value) + if isinstance(value, string_type): + # if it's a string - does it represent an integer ? + try: + value = int(value) + except ValueError: + raise VdtTypeError(value) + if (min_val is not None) and (value < min_val): + raise VdtValueTooSmallError(value) + if (max_val is not None) and (value > max_val): + raise VdtValueTooBigError(value) + return value + + +def is_float(value, min=None, max=None): + """ + A check that tests that a given value is a float + (an integer will be accepted), and optionally - that it is between bounds. + + If the value is a string, then the conversion is done - if possible. + Otherwise a VdtError is raised. + + This can accept negative values. + + >>> vtor.check('float', '2') + 2.0 + + From now on we multiply the value to avoid comparing decimals + + >>> vtor.check('float', '-6.8') * 10 + -68.0 + >>> vtor.check('float', '12.2') * 10 + 122.0 + >>> vtor.check('float', 8.4) * 10 + 84.0 + >>> vtor.check('float', 'a') + Traceback (most recent call last): + VdtTypeError: the value "a" is of the wrong type. + >>> vtor.check('float(10.1)', '10.2') * 10 + 102.0 + >>> vtor.check('float(max=20.2)', '15.1') * 10 + 151.0 + >>> vtor.check('float(10.0)', '9.0') + Traceback (most recent call last): + VdtValueTooSmallError: the value "9.0" is too small. + >>> vtor.check('float(max=20.0)', '35.0') + Traceback (most recent call last): + VdtValueTooBigError: the value "35.0" is too big. + """ + (min_val, max_val) = _is_num_param( + ('min', 'max'), (min, max), to_float=True) + if not isinstance(value, (int, long, float, string_type)): + raise VdtTypeError(value) + if not isinstance(value, float): + # if it's a string - does it represent a float ? + try: + value = float(value) + except ValueError: + raise VdtTypeError(value) + if (min_val is not None) and (value < min_val): + raise VdtValueTooSmallError(value) + if (max_val is not None) and (value > max_val): + raise VdtValueTooBigError(value) + return value + + +bool_dict = { + True: True, 'on': True, '1': True, 'true': True, 'yes': True, + False: False, 'off': False, '0': False, 'false': False, 'no': False, +} + + +def is_boolean(value): + """ + Check if the value represents a boolean. + + >>> vtor.check('boolean', 0) + 0 + >>> vtor.check('boolean', False) + 0 + >>> vtor.check('boolean', '0') + 0 + >>> vtor.check('boolean', 'off') + 0 + >>> vtor.check('boolean', 'false') + 0 + >>> vtor.check('boolean', 'no') + 0 + >>> vtor.check('boolean', 'nO') + 0 + >>> vtor.check('boolean', 'NO') + 0 + >>> vtor.check('boolean', 1) + 1 + >>> vtor.check('boolean', True) + 1 + >>> vtor.check('boolean', '1') + 1 + >>> vtor.check('boolean', 'on') + 1 + >>> vtor.check('boolean', 'true') + 1 + >>> vtor.check('boolean', 'yes') + 1 + >>> vtor.check('boolean', 'Yes') + 1 + >>> vtor.check('boolean', 'YES') + 1 + >>> vtor.check('boolean', '') + Traceback (most recent call last): + VdtTypeError: the value "" is of the wrong type. + >>> vtor.check('boolean', 'up') + Traceback (most recent call last): + VdtTypeError: the value "up" is of the wrong type. + + """ + if isinstance(value, string_type): + try: + return bool_dict[value.lower()] + except KeyError: + raise VdtTypeError(value) + # we do an equality test rather than an identity test + # this ensures Python 2.2 compatibilty + # and allows 0 and 1 to represent True and False + if value == False: + return False + elif value == True: + return True + else: + raise VdtTypeError(value) + + +def is_ip_addr(value): + """ + Check that the supplied value is an Internet Protocol address, v.4, + represented by a dotted-quad string, i.e. '1.2.3.4'. + + >>> vtor.check('ip_addr', '1 ') + '1' + >>> vtor.check('ip_addr', ' 1.2') + '1.2' + >>> vtor.check('ip_addr', ' 1.2.3 ') + '1.2.3' + >>> vtor.check('ip_addr', '1.2.3.4') + '1.2.3.4' + >>> vtor.check('ip_addr', '0.0.0.0') + '0.0.0.0' + >>> vtor.check('ip_addr', '255.255.255.255') + '255.255.255.255' + >>> vtor.check('ip_addr', '255.255.255.256') + Traceback (most recent call last): + VdtValueError: the value "255.255.255.256" is unacceptable. + >>> vtor.check('ip_addr', '1.2.3.4.5') + Traceback (most recent call last): + VdtValueError: the value "1.2.3.4.5" is unacceptable. + >>> vtor.check('ip_addr', 0) + Traceback (most recent call last): + VdtTypeError: the value "0" is of the wrong type. + """ + if not isinstance(value, string_type): + raise VdtTypeError(value) + value = value.strip() + try: + dottedQuadToNum(value) + except ValueError: + raise VdtValueError(value) + return value + + +def is_list(value, min=None, max=None): + """ + Check that the value is a list of values. + + You can optionally specify the minimum and maximum number of members. + + It does no check on list members. + + >>> vtor.check('list', ()) + [] + >>> vtor.check('list', []) + [] + >>> vtor.check('list', (1, 2)) + [1, 2] + >>> vtor.check('list', [1, 2]) + [1, 2] + >>> vtor.check('list(3)', (1, 2)) + Traceback (most recent call last): + VdtValueTooShortError: the value "(1, 2)" is too short. + >>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6)) + Traceback (most recent call last): + VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long. + >>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4)) + [1, 2, 3, 4] + >>> vtor.check('list', 0) + Traceback (most recent call last): + VdtTypeError: the value "0" is of the wrong type. + >>> vtor.check('list', '12') + Traceback (most recent call last): + VdtTypeError: the value "12" is of the wrong type. + """ + (min_len, max_len) = _is_num_param(('min', 'max'), (min, max)) + if isinstance(value, string_type): + raise VdtTypeError(value) + try: + num_members = len(value) + except TypeError: + raise VdtTypeError(value) + if min_len is not None and num_members < min_len: + raise VdtValueTooShortError(value) + if max_len is not None and num_members > max_len: + raise VdtValueTooLongError(value) + return list(value) + + +def is_tuple(value, min=None, max=None): + """ + Check that the value is a tuple of values. + + You can optionally specify the minimum and maximum number of members. + + It does no check on members. + + >>> vtor.check('tuple', ()) + () + >>> vtor.check('tuple', []) + () + >>> vtor.check('tuple', (1, 2)) + (1, 2) + >>> vtor.check('tuple', [1, 2]) + (1, 2) + >>> vtor.check('tuple(3)', (1, 2)) + Traceback (most recent call last): + VdtValueTooShortError: the value "(1, 2)" is too short. + >>> vtor.check('tuple(max=5)', (1, 2, 3, 4, 5, 6)) + Traceback (most recent call last): + VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long. + >>> vtor.check('tuple(min=3, max=5)', (1, 2, 3, 4)) + (1, 2, 3, 4) + >>> vtor.check('tuple', 0) + Traceback (most recent call last): + VdtTypeError: the value "0" is of the wrong type. + >>> vtor.check('tuple', '12') + Traceback (most recent call last): + VdtTypeError: the value "12" is of the wrong type. + """ + return tuple(is_list(value, min, max)) + + +def is_string(value, min=None, max=None): + """ + Check that the supplied value is a string. + + You can optionally specify the minimum and maximum number of members. + + >>> vtor.check('string', '0') + '0' + >>> vtor.check('string', 0) + Traceback (most recent call last): + VdtTypeError: the value "0" is of the wrong type. + >>> vtor.check('string(2)', '12') + '12' + >>> vtor.check('string(2)', '1') + Traceback (most recent call last): + VdtValueTooShortError: the value "1" is too short. + >>> vtor.check('string(min=2, max=3)', '123') + '123' + >>> vtor.check('string(min=2, max=3)', '1234') + Traceback (most recent call last): + VdtValueTooLongError: the value "1234" is too long. + """ + if not isinstance(value, string_type): + raise VdtTypeError(value) + (min_len, max_len) = _is_num_param(('min', 'max'), (min, max)) + try: + num_members = len(value) + except TypeError: + raise VdtTypeError(value) + if min_len is not None and num_members < min_len: + raise VdtValueTooShortError(value) + if max_len is not None and num_members > max_len: + raise VdtValueTooLongError(value) + return value + + +def is_int_list(value, min=None, max=None): + """ + Check that the value is a list of integers. + + You can optionally specify the minimum and maximum number of members. + + Each list member is checked that it is an integer. + + >>> vtor.check('int_list', ()) + [] + >>> vtor.check('int_list', []) + [] + >>> vtor.check('int_list', (1, 2)) + [1, 2] + >>> vtor.check('int_list', [1, 2]) + [1, 2] + >>> vtor.check('int_list', [1, 'a']) + Traceback (most recent call last): + VdtTypeError: the value "a" is of the wrong type. + """ + return [is_integer(mem) for mem in is_list(value, min, max)] + + +def is_bool_list(value, min=None, max=None): + """ + Check that the value is a list of booleans. + + You can optionally specify the minimum and maximum number of members. + + Each list member is checked that it is a boolean. + + >>> vtor.check('bool_list', ()) + [] + >>> vtor.check('bool_list', []) + [] + >>> check_res = vtor.check('bool_list', (True, False)) + >>> check_res == [True, False] + 1 + >>> check_res = vtor.check('bool_list', [True, False]) + >>> check_res == [True, False] + 1 + >>> vtor.check('bool_list', [True, 'a']) + Traceback (most recent call last): + VdtTypeError: the value "a" is of the wrong type. + """ + return [is_boolean(mem) for mem in is_list(value, min, max)] + + +def is_float_list(value, min=None, max=None): + """ + Check that the value is a list of floats. + + You can optionally specify the minimum and maximum number of members. + + Each list member is checked that it is a float. + + >>> vtor.check('float_list', ()) + [] + >>> vtor.check('float_list', []) + [] + >>> vtor.check('float_list', (1, 2.0)) + [1.0, 2.0] + >>> vtor.check('float_list', [1, 2.0]) + [1.0, 2.0] + >>> vtor.check('float_list', [1, 'a']) + Traceback (most recent call last): + VdtTypeError: the value "a" is of the wrong type. + """ + return [is_float(mem) for mem in is_list(value, min, max)] + + +def is_string_list(value, min=None, max=None): + """ + Check that the value is a list of strings. + + You can optionally specify the minimum and maximum number of members. + + Each list member is checked that it is a string. + + >>> vtor.check('string_list', ()) + [] + >>> vtor.check('string_list', []) + [] + >>> vtor.check('string_list', ('a', 'b')) + ['a', 'b'] + >>> vtor.check('string_list', ['a', 1]) + Traceback (most recent call last): + VdtTypeError: the value "1" is of the wrong type. + >>> vtor.check('string_list', 'hello') + Traceback (most recent call last): + VdtTypeError: the value "hello" is of the wrong type. + """ + if isinstance(value, string_type): + raise VdtTypeError(value) + return [is_string(mem) for mem in is_list(value, min, max)] + + +def is_ip_addr_list(value, min=None, max=None): + """ + Check that the value is a list of IP addresses. + + You can optionally specify the minimum and maximum number of members. + + Each list member is checked that it is an IP address. + + >>> vtor.check('ip_addr_list', ()) + [] + >>> vtor.check('ip_addr_list', []) + [] + >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8')) + ['1.2.3.4', '5.6.7.8'] + >>> vtor.check('ip_addr_list', ['a']) + Traceback (most recent call last): + VdtValueError: the value "a" is unacceptable. + """ + return [is_ip_addr(mem) for mem in is_list(value, min, max)] + + +def force_list(value, min=None, max=None): + """ + Check that a value is a list, coercing strings into + a list with one member. Useful where users forget the + trailing comma that turns a single value into a list. + + You can optionally specify the minimum and maximum number of members. + A minumum of greater than one will fail if the user only supplies a + string. + + >>> vtor.check('force_list', ()) + [] + >>> vtor.check('force_list', []) + [] + >>> vtor.check('force_list', 'hello') + ['hello'] + """ + if not isinstance(value, (list, tuple)): + value = [value] + return is_list(value, min, max) + + + +fun_dict = { + 'integer': is_integer, + 'float': is_float, + 'ip_addr': is_ip_addr, + 'string': is_string, + 'boolean': is_boolean, +} + + +def is_mixed_list(value, *args): + """ + Check that the value is a list. + Allow specifying the type of each member. + Work on lists of specific lengths. + + You specify each member as a positional argument specifying type + + Each type should be one of the following strings : + 'integer', 'float', 'ip_addr', 'string', 'boolean' + + So you can specify a list of two strings, followed by + two integers as : + + mixed_list('string', 'string', 'integer', 'integer') + + The length of the list must match the number of positional + arguments you supply. + + >>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')" + >>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True)) + >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] + 1 + >>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True')) + >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] + 1 + >>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True)) + Traceback (most recent call last): + VdtTypeError: the value "b" is of the wrong type. + >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a')) + Traceback (most recent call last): + VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short. + >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b')) + Traceback (most recent call last): + VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long. + >>> vtor.check(mix_str, 0) + Traceback (most recent call last): + VdtTypeError: the value "0" is of the wrong type. + + >>> vtor.check('mixed_list("yoda")', ('a')) + Traceback (most recent call last): + VdtParamError: passed an incorrect value "KeyError('yoda',)" for parameter "'mixed_list'" + """ + try: + length = len(value) + except TypeError: + raise VdtTypeError(value) + if length < len(args): + raise VdtValueTooShortError(value) + elif length > len(args): + raise VdtValueTooLongError(value) + try: + return [fun_dict[arg](val) for arg, val in zip(args, value)] + except KeyError as e: + raise VdtParamError('mixed_list', e) + + +def is_option(value, *options): + """ + This check matches the value to any of a set of options. + + >>> vtor.check('option("yoda", "jedi")', 'yoda') + 'yoda' + >>> vtor.check('option("yoda", "jedi")', 'jed') + Traceback (most recent call last): + VdtValueError: the value "jed" is unacceptable. + >>> vtor.check('option("yoda", "jedi")', 0) + Traceback (most recent call last): + VdtTypeError: the value "0" is of the wrong type. + """ + if not isinstance(value, string_type): + raise VdtTypeError(value) + if not value in options: + raise VdtValueError(value) + return value + + +def _test(value, *args, **keywargs): + """ + A function that exists for test purposes. + + >>> checks = [ + ... '3, 6, min=1, max=3, test=list(a, b, c)', + ... '3', + ... '3, 6', + ... '3,', + ... 'min=1, test="a b c"', + ... 'min=5, test="a, b, c"', + ... 'min=1, max=3, test="a, b, c"', + ... 'min=-100, test=-99', + ... 'min=1, max=3', + ... '3, 6, test="36"', + ... '3, 6, test="a, b, c"', + ... '3, max=3, test=list("a", "b", "c")', + ... '''3, max=3, test=list("'a'", 'b', "x=(c)")''', + ... "test='x=fish(3)'", + ... ] + >>> v = Validator({'test': _test}) + >>> for entry in checks: + ... pprint(v.check(('test(%s)' % entry), 3)) + (3, ('3', '6'), {'max': '3', 'min': '1', 'test': ['a', 'b', 'c']}) + (3, ('3',), {}) + (3, ('3', '6'), {}) + (3, ('3',), {}) + (3, (), {'min': '1', 'test': 'a b c'}) + (3, (), {'min': '5', 'test': 'a, b, c'}) + (3, (), {'max': '3', 'min': '1', 'test': 'a, b, c'}) + (3, (), {'min': '-100', 'test': '-99'}) + (3, (), {'max': '3', 'min': '1'}) + (3, ('3', '6'), {'test': '36'}) + (3, ('3', '6'), {'test': 'a, b, c'}) + (3, ('3',), {'max': '3', 'test': ['a', 'b', 'c']}) + (3, ('3',), {'max': '3', 'test': ["'a'", 'b', 'x=(c)']}) + (3, (), {'test': 'x=fish(3)'}) + + >>> v = Validator() + >>> v.check('integer(default=6)', '3') + 3 + >>> v.check('integer(default=6)', None, True) + 6 + >>> v.get_default_value('integer(default=6)') + 6 + >>> v.get_default_value('float(default=6)') + 6.0 + >>> v.get_default_value('pass(default=None)') + >>> v.get_default_value("string(default='None')") + 'None' + >>> v.get_default_value('pass') + Traceback (most recent call last): + KeyError: 'Check "pass" has no default value.' + >>> v.get_default_value('pass(default=list(1, 2, 3, 4))') + ['1', '2', '3', '4'] + + >>> v = Validator() + >>> v.check("pass(default=None)", None, True) + >>> v.check("pass(default='None')", None, True) + 'None' + >>> v.check('pass(default="None")', None, True) + 'None' + >>> v.check('pass(default=list(1, 2, 3, 4))', None, True) + ['1', '2', '3', '4'] + + Bug test for unicode arguments + >>> v = Validator() + >>> v.check(unicode('string(min=4)'), unicode('test')) == unicode('test') + True + + >>> v = Validator() + >>> v.get_default_value(unicode('string(min=4, default="1234")')) == unicode('1234') + True + >>> v.check(unicode('string(min=4, default="1234")'), unicode('test')) == unicode('test') + True + + >>> v = Validator() + >>> default = v.get_default_value('string(default=None)') + >>> default == None + 1 + """ + return (value, args, keywargs) + + +def _test2(): + """ + >>> + >>> v = Validator() + >>> v.get_default_value('string(default="#ff00dd")') + '#ff00dd' + >>> v.get_default_value('integer(default=3) # comment') + 3 + """ + +def _test3(): + r""" + >>> vtor.check('string(default="")', '', missing=True) + '' + >>> vtor.check('string(default="\n")', '', missing=True) + '\n' + >>> print(vtor.check('string(default="\n")', '', missing=True)) + + + >>> vtor.check('string()', '\n') + '\n' + >>> vtor.check('string(default="\n\n\n")', '', missing=True) + '\n\n\n' + >>> vtor.check('string()', 'random \n text goes here\n\n') + 'random \n text goes here\n\n' + >>> vtor.check('string(default=" \nrandom text\ngoes \n here\n\n ")', + ... '', missing=True) + ' \nrandom text\ngoes \n here\n\n ' + >>> vtor.check("string(default='\n\n\n')", '', missing=True) + '\n\n\n' + >>> vtor.check("option('\n','a','b',default='\n')", '', missing=True) + '\n' + >>> vtor.check("string_list()", ['foo', '\n', 'bar']) + ['foo', '\n', 'bar'] + >>> vtor.check("string_list(default=list('\n'))", '', missing=True) + ['\n'] + """ + + +if __name__ == '__main__': + # run the code tests in doctest format + import sys + import doctest + m = sys.modules.get('__main__') + globs = m.__dict__.copy() + globs.update({ + 'vtor': Validator(), + }) + + failures, tests = doctest.testmod( + m, globs=globs, + optionflags=doctest.IGNORE_EXCEPTION_DETAIL | doctest.ELLIPSIS) + assert not failures, '{} failures out of {} tests'.format(failures, tests) diff --git a/astropy/extern/css/jquery.dataTables.css b/astropy/extern/css/jquery.dataTables.css new file mode 100644 index 0000000..151b858 --- /dev/null +++ b/astropy/extern/css/jquery.dataTables.css @@ -0,0 +1,452 @@ +/* + * Table styles + */ +table.dataTable { + width: 100%; + margin: 0 auto; + clear: both; + border-collapse: separate; + border-spacing: 0; + /* + * Header and footer styles + */ + /* + * Body styles + */ +} +table.dataTable thead th, +table.dataTable tfoot th { + font-weight: bold; +} +table.dataTable thead th, +table.dataTable thead td { + padding: 10px 18px; + border-bottom: 1px solid #111; +} +table.dataTable thead th:active, +table.dataTable thead td:active { + outline: none; +} +table.dataTable tfoot th, +table.dataTable tfoot td { + padding: 10px 18px 6px 18px; + border-top: 1px solid #111; +} +table.dataTable thead .sorting, +table.dataTable thead .sorting_asc, +table.dataTable thead .sorting_desc { + cursor: pointer; + *cursor: hand; +} +table.dataTable thead .sorting, +table.dataTable thead .sorting_asc, +table.dataTable thead .sorting_desc, +table.dataTable thead .sorting_asc_disabled, +table.dataTable thead .sorting_desc_disabled { + background-repeat: no-repeat; + background-position: center right; +} +table.dataTable thead .sorting { + background-image: url("../images/sort_both.png"); +} +table.dataTable thead .sorting_asc { + background-image: url("../images/sort_asc.png"); +} +table.dataTable thead .sorting_desc { + background-image: url("../images/sort_desc.png"); +} +table.dataTable thead .sorting_asc_disabled { + background-image: url("../images/sort_asc_disabled.png"); +} +table.dataTable thead .sorting_desc_disabled { + background-image: url("../images/sort_desc_disabled.png"); +} +table.dataTable tbody tr { + background-color: #ffffff; +} +table.dataTable tbody tr.selected { + background-color: #B0BED9; +} +table.dataTable tbody th, +table.dataTable tbody td { + padding: 8px 10px; +} +table.dataTable.row-border tbody th, table.dataTable.row-border tbody td, table.dataTable.display tbody th, table.dataTable.display tbody td { + border-top: 1px solid #ddd; +} +table.dataTable.row-border tbody tr:first-child th, +table.dataTable.row-border tbody tr:first-child td, table.dataTable.display tbody tr:first-child th, +table.dataTable.display tbody tr:first-child td { + border-top: none; +} +table.dataTable.cell-border tbody th, table.dataTable.cell-border tbody td { + border-top: 1px solid #ddd; + border-right: 1px solid #ddd; +} +table.dataTable.cell-border tbody tr th:first-child, +table.dataTable.cell-border tbody tr td:first-child { + border-left: 1px solid #ddd; +} +table.dataTable.cell-border tbody tr:first-child th, +table.dataTable.cell-border tbody tr:first-child td { + border-top: none; +} +table.dataTable.stripe tbody tr.odd, table.dataTable.display tbody tr.odd { + background-color: #f9f9f9; +} +table.dataTable.stripe tbody tr.odd.selected, table.dataTable.display tbody tr.odd.selected { + background-color: #acbad4; +} +table.dataTable.hover tbody tr:hover, table.dataTable.display tbody tr:hover { + background-color: #f6f6f6; +} +table.dataTable.hover tbody tr:hover.selected, table.dataTable.display tbody tr:hover.selected { + background-color: #aab7d1; +} +table.dataTable.order-column tbody tr > .sorting_1, +table.dataTable.order-column tbody tr > .sorting_2, +table.dataTable.order-column tbody tr > .sorting_3, table.dataTable.display tbody tr > .sorting_1, +table.dataTable.display tbody tr > .sorting_2, +table.dataTable.display tbody tr > .sorting_3 { + background-color: #fafafa; +} +table.dataTable.order-column tbody tr.selected > .sorting_1, +table.dataTable.order-column tbody tr.selected > .sorting_2, +table.dataTable.order-column tbody tr.selected > .sorting_3, table.dataTable.display tbody tr.selected > .sorting_1, +table.dataTable.display tbody tr.selected > .sorting_2, +table.dataTable.display tbody tr.selected > .sorting_3 { + background-color: #acbad5; +} +table.dataTable.display tbody tr.odd > .sorting_1, table.dataTable.order-column.stripe tbody tr.odd > .sorting_1 { + background-color: #f1f1f1; +} +table.dataTable.display tbody tr.odd > .sorting_2, table.dataTable.order-column.stripe tbody tr.odd > .sorting_2 { + background-color: #f3f3f3; +} +table.dataTable.display tbody tr.odd > .sorting_3, table.dataTable.order-column.stripe tbody tr.odd > .sorting_3 { + background-color: whitesmoke; +} +table.dataTable.display tbody tr.odd.selected > .sorting_1, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_1 { + background-color: #a6b4cd; +} +table.dataTable.display tbody tr.odd.selected > .sorting_2, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_2 { + background-color: #a8b5cf; +} +table.dataTable.display tbody tr.odd.selected > .sorting_3, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_3 { + background-color: #a9b7d1; +} +table.dataTable.display tbody tr.even > .sorting_1, table.dataTable.order-column.stripe tbody tr.even > .sorting_1 { + background-color: #fafafa; +} +table.dataTable.display tbody tr.even > .sorting_2, table.dataTable.order-column.stripe tbody tr.even > .sorting_2 { + background-color: #fcfcfc; +} +table.dataTable.display tbody tr.even > .sorting_3, table.dataTable.order-column.stripe tbody tr.even > .sorting_3 { + background-color: #fefefe; +} +table.dataTable.display tbody tr.even.selected > .sorting_1, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_1 { + background-color: #acbad5; +} +table.dataTable.display tbody tr.even.selected > .sorting_2, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_2 { + background-color: #aebcd6; +} +table.dataTable.display tbody tr.even.selected > .sorting_3, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_3 { + background-color: #afbdd8; +} +table.dataTable.display tbody tr:hover > .sorting_1, table.dataTable.order-column.hover tbody tr:hover > .sorting_1 { + background-color: #eaeaea; +} +table.dataTable.display tbody tr:hover > .sorting_2, table.dataTable.order-column.hover tbody tr:hover > .sorting_2 { + background-color: #ececec; +} +table.dataTable.display tbody tr:hover > .sorting_3, table.dataTable.order-column.hover tbody tr:hover > .sorting_3 { + background-color: #efefef; +} +table.dataTable.display tbody tr:hover.selected > .sorting_1, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_1 { + background-color: #a2aec7; +} +table.dataTable.display tbody tr:hover.selected > .sorting_2, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_2 { + background-color: #a3b0c9; +} +table.dataTable.display tbody tr:hover.selected > .sorting_3, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_3 { + background-color: #a5b2cb; +} +table.dataTable.no-footer { + border-bottom: 1px solid #111; +} +table.dataTable.nowrap th, table.dataTable.nowrap td { + white-space: nowrap; +} +table.dataTable.compact thead th, +table.dataTable.compact thead td { + padding: 4px 17px 4px 4px; +} +table.dataTable.compact tfoot th, +table.dataTable.compact tfoot td { + padding: 4px; +} +table.dataTable.compact tbody th, +table.dataTable.compact tbody td { + padding: 4px; +} +table.dataTable th.dt-left, +table.dataTable td.dt-left { + text-align: left; +} +table.dataTable th.dt-center, +table.dataTable td.dt-center, +table.dataTable td.dataTables_empty { + text-align: center; +} +table.dataTable th.dt-right, +table.dataTable td.dt-right { + text-align: right; +} +table.dataTable th.dt-justify, +table.dataTable td.dt-justify { + text-align: justify; +} +table.dataTable th.dt-nowrap, +table.dataTable td.dt-nowrap { + white-space: nowrap; +} +table.dataTable thead th.dt-head-left, +table.dataTable thead td.dt-head-left, +table.dataTable tfoot th.dt-head-left, +table.dataTable tfoot td.dt-head-left { + text-align: left; +} +table.dataTable thead th.dt-head-center, +table.dataTable thead td.dt-head-center, +table.dataTable tfoot th.dt-head-center, +table.dataTable tfoot td.dt-head-center { + text-align: center; +} +table.dataTable thead th.dt-head-right, +table.dataTable thead td.dt-head-right, +table.dataTable tfoot th.dt-head-right, +table.dataTable tfoot td.dt-head-right { + text-align: right; +} +table.dataTable thead th.dt-head-justify, +table.dataTable thead td.dt-head-justify, +table.dataTable tfoot th.dt-head-justify, +table.dataTable tfoot td.dt-head-justify { + text-align: justify; +} +table.dataTable thead th.dt-head-nowrap, +table.dataTable thead td.dt-head-nowrap, +table.dataTable tfoot th.dt-head-nowrap, +table.dataTable tfoot td.dt-head-nowrap { + white-space: nowrap; +} +table.dataTable tbody th.dt-body-left, +table.dataTable tbody td.dt-body-left { + text-align: left; +} +table.dataTable tbody th.dt-body-center, +table.dataTable tbody td.dt-body-center { + text-align: center; +} +table.dataTable tbody th.dt-body-right, +table.dataTable tbody td.dt-body-right { + text-align: right; +} +table.dataTable tbody th.dt-body-justify, +table.dataTable tbody td.dt-body-justify { + text-align: justify; +} +table.dataTable tbody th.dt-body-nowrap, +table.dataTable tbody td.dt-body-nowrap { + white-space: nowrap; +} + +table.dataTable, +table.dataTable th, +table.dataTable td { + -webkit-box-sizing: content-box; + box-sizing: content-box; +} + +/* + * Control feature layout + */ +.dataTables_wrapper { + position: relative; + clear: both; + *zoom: 1; + zoom: 1; +} +.dataTables_wrapper .dataTables_length { + float: left; +} +.dataTables_wrapper .dataTables_filter { + float: right; + text-align: right; +} +.dataTables_wrapper .dataTables_filter input { + margin-left: 0.5em; +} +.dataTables_wrapper .dataTables_info { + clear: both; + float: left; + padding-top: 0.755em; +} +.dataTables_wrapper .dataTables_paginate { + float: right; + text-align: right; + padding-top: 0.25em; +} +.dataTables_wrapper .dataTables_paginate .paginate_button { + box-sizing: border-box; + display: inline-block; + min-width: 1.5em; + padding: 0.5em 1em; + margin-left: 2px; + text-align: center; + text-decoration: none !important; + cursor: pointer; + *cursor: hand; + color: #333 !important; + border: 1px solid transparent; + border-radius: 2px; +} +.dataTables_wrapper .dataTables_paginate .paginate_button.current, .dataTables_wrapper .dataTables_paginate .paginate_button.current:hover { + color: #333 !important; + border: 1px solid #979797; + background-color: white; + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, white), color-stop(100%, #dcdcdc)); + /* Chrome,Safari4+ */ + background: -webkit-linear-gradient(top, white 0%, #dcdcdc 100%); + /* Chrome10+,Safari5.1+ */ + background: -moz-linear-gradient(top, white 0%, #dcdcdc 100%); + /* FF3.6+ */ + background: -ms-linear-gradient(top, white 0%, #dcdcdc 100%); + /* IE10+ */ + background: -o-linear-gradient(top, white 0%, #dcdcdc 100%); + /* Opera 11.10+ */ + background: linear-gradient(to bottom, white 0%, #dcdcdc 100%); + /* W3C */ +} +.dataTables_wrapper .dataTables_paginate .paginate_button.disabled, .dataTables_wrapper .dataTables_paginate .paginate_button.disabled:hover, .dataTables_wrapper .dataTables_paginate .paginate_button.disabled:active { + cursor: default; + color: #666 !important; + border: 1px solid transparent; + background: transparent; + box-shadow: none; +} +.dataTables_wrapper .dataTables_paginate .paginate_button:hover { + color: white !important; + border: 1px solid #111; + background-color: #585858; + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #585858), color-stop(100%, #111)); + /* Chrome,Safari4+ */ + background: -webkit-linear-gradient(top, #585858 0%, #111 100%); + /* Chrome10+,Safari5.1+ */ + background: -moz-linear-gradient(top, #585858 0%, #111 100%); + /* FF3.6+ */ + background: -ms-linear-gradient(top, #585858 0%, #111 100%); + /* IE10+ */ + background: -o-linear-gradient(top, #585858 0%, #111 100%); + /* Opera 11.10+ */ + background: linear-gradient(to bottom, #585858 0%, #111 100%); + /* W3C */ +} +.dataTables_wrapper .dataTables_paginate .paginate_button:active { + outline: none; + background-color: #2b2b2b; + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #2b2b2b), color-stop(100%, #0c0c0c)); + /* Chrome,Safari4+ */ + background: -webkit-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%); + /* Chrome10+,Safari5.1+ */ + background: -moz-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%); + /* FF3.6+ */ + background: -ms-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%); + /* IE10+ */ + background: -o-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%); + /* Opera 11.10+ */ + background: linear-gradient(to bottom, #2b2b2b 0%, #0c0c0c 100%); + /* W3C */ + box-shadow: inset 0 0 3px #111; +} +.dataTables_wrapper .dataTables_paginate .ellipsis { + padding: 0 1em; +} +.dataTables_wrapper .dataTables_processing { + position: absolute; + top: 50%; + left: 50%; + width: 100%; + height: 40px; + margin-left: -50%; + margin-top: -25px; + padding-top: 20px; + text-align: center; + font-size: 1.2em; + background-color: white; + background: -webkit-gradient(linear, left top, right top, color-stop(0%, rgba(255, 255, 255, 0)), color-stop(25%, rgba(255, 255, 255, 0.9)), color-stop(75%, rgba(255, 255, 255, 0.9)), color-stop(100%, rgba(255, 255, 255, 0))); + background: -webkit-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%); + background: -moz-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%); + background: -ms-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%); + background: -o-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%); + background: linear-gradient(to right, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%); +} +.dataTables_wrapper .dataTables_length, +.dataTables_wrapper .dataTables_filter, +.dataTables_wrapper .dataTables_info, +.dataTables_wrapper .dataTables_processing, +.dataTables_wrapper .dataTables_paginate { + color: #333; +} +.dataTables_wrapper .dataTables_scroll { + clear: both; +} +.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody { + *margin-top: -1px; + -webkit-overflow-scrolling: touch; +} +.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody th, .dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody td { + vertical-align: middle; +} +.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody th > div.dataTables_sizing, +.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody td > div.dataTables_sizing { + height: 0; + overflow: hidden; + margin: 0 !important; + padding: 0 !important; +} +.dataTables_wrapper.no-footer .dataTables_scrollBody { + border-bottom: 1px solid #111; +} +.dataTables_wrapper.no-footer div.dataTables_scrollHead table, +.dataTables_wrapper.no-footer div.dataTables_scrollBody table { + border-bottom: none; +} +.dataTables_wrapper:after { + visibility: hidden; + display: block; + content: ""; + clear: both; + height: 0; +} + +@media screen and (max-width: 767px) { + .dataTables_wrapper .dataTables_info, + .dataTables_wrapper .dataTables_paginate { + float: none; + text-align: center; + } + .dataTables_wrapper .dataTables_paginate { + margin-top: 0.5em; + } +} +@media screen and (max-width: 640px) { + .dataTables_wrapper .dataTables_length, + .dataTables_wrapper .dataTables_filter { + float: none; + text-align: center; + } + .dataTables_wrapper .dataTables_filter { + margin-top: 0.5em; + } +} diff --git a/astropy/extern/js/jquery-3.1.1.js b/astropy/extern/js/jquery-3.1.1.js new file mode 100644 index 0000000..072e308 --- /dev/null +++ b/astropy/extern/js/jquery-3.1.1.js @@ -0,0 +1,10220 @@ +/*! + * jQuery JavaScript Library v3.1.1 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright jQuery Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2016-09-22T22:30Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var document = window.document; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var concat = arr.concat; + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + + + + function DOMEval( code, doc ) { + doc = doc || document; + + var script = doc.createElement( "script" ); + + script.text = code; + doc.head.appendChild( script ).parentNode.removeChild( script ); + } +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.1.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android <=4.0 only + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = jQuery.isArray( copy ) ) ) ) { + + if ( copyIsArray ) { + copyIsArray = false; + clone = src && jQuery.isArray( src ) ? src : []; + + } else { + clone = src && jQuery.isPlainObject( src ) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isFunction: function( obj ) { + return jQuery.type( obj ) === "function"; + }, + + isArray: Array.isArray, + + isWindow: function( obj ) { + return obj != null && obj === obj.window; + }, + + isNumeric: function( obj ) { + + // As of jQuery 3.0, isNumeric is limited to + // strings and numbers (primitives or objects) + // that can be coerced to finite numbers (gh-2662) + var type = jQuery.type( obj ); + return ( type === "number" || type === "string" ) && + + // parseFloat NaNs numeric-cast false positives ("") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + !isNaN( obj - parseFloat( obj ) ); + }, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + + /* eslint-disable no-unused-vars */ + // See https://github.com/eslint/eslint/issues/6125 + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + globalEval: function( code ) { + DOMEval( code ); + }, + + // Convert dashed to camelCase; used by the css and data modules + // Support: IE <=9 - 11, Edge 12 - 13 + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + nodeName: function( elem, name ) { + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var tmp, args, proxy; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: Date.now, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.3 + * https://sizzlejs.com/ + * + * Copyright jQuery Foundation and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2016-08-08 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + disabledAncestor = addCombinator( + function( elem ) { + return elem.disabled === true && ("form" in elem || "label" in elem); + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !compilerCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + + if ( nodeType !== 1 ) { + newContext = context; + newSelector = selector; + + // qSA looks outside Element context, which is not what we want + // Thanks to Andrew Dupont for this workaround technique + // Support: IE <=8 + // Exclude object elements + } else if ( context.nodeName.toLowerCase() !== "object" ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement("fieldset"); + + try { + return !!fn( el ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + disabledAncestor( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9-11, Edge + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + if ( preferredDoc !== document && + (subWindow = document.defaultView) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert(function( el ) { + el.className = "i"; + return !el.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( el ) { + el.appendChild( document.createComment("") ); + return !el.getElementsByTagName("*").length; + }); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + }); + + // ID filter and find + if ( support.getById ) { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( (elem = elems[i++]) ) { + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( el ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll("[msallowcapture^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push("~="); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push(".#.+[+~]"); + } + }); + + assert(function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement("input"); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll(":enabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll(":disabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( el ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === document ? -1 : + b === document ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + !compilerCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch (e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return (sel + "").replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + // Use previously-cached element index if available + if ( useCache ) { + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + // Don't keep the element (issue #299) + input[0] = null; + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( (oldCache = uniqueCache[ key ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context === document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + if ( !context && elem.ownerDocument !== document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( (matcher = elementMatchers[j++]) ) { + if ( matcher( elem, context || document, xml) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( (matcher = setMatchers[j++]) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert(function( el ) { + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; +}); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute("href") === "#" ; +}) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + }); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert(function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +}) ) { + addHandle( "value", function( elem, name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + }); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert(function( el ) { + return el.getAttribute("disabled") == null; +}) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; + } + }); +} + +return Sizzle; + +})( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +var risSimple = /^.[^:#\[\.,]*$/; + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Simple selector that can be filtered directly, removing non-Elements + if ( risSimple.test( qualifier ) ) { + return jQuery.filter( qualifier, elements, not ); + } + + // Complex selector, compare the two sets, removing non-Elements + qualifier = jQuery.filter( qualifier, elements ); + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; + } ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( jQuery.isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + return elem.contentDocument || jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( jQuery.isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + resolve.call( undefined, value ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.call( undefined, value ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && jQuery.isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( jQuery.isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the master Deferred + master = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + master.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( master.state() === "pending" || + jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return master.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); + } + + return master.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Hold (or release) the ready event + holdReady: function( hold ) { + if ( hold ) { + jQuery.readyWait++; + } else { + jQuery.ready( true ); + } + }, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( jQuery.type( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !jQuery.isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ jQuery.camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ jQuery.camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( jQuery.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( jQuery.camelCase ); + } else { + key = jQuery.camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = jQuery.camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || jQuery.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + jQuery.contains( elem.ownerDocument, elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + +var swap = function( elem, options, callback, args ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.apply( elem, args || [] ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, + scale = 1, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + do { + + // If previous iteration zeroed out, double until we get *something*. + // Use string for doubling so we don't accidentally see scale as unchanged below + scale = scale || ".5"; + + // Adjust and apply + initialInUnit = initialInUnit / scale; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Update scale, tolerating zero or NaN from tween.cur() + // Break the loop if scale is unchanged or perfect, or if we've just had enough. + } while ( + scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations + ); + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); + +var rscriptType = ( /^$|\/(?:java|ecma)script/i ); + + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // Support: IE <=9 only + option: [ 1, "" ], + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
    " ], + col: [ 2, "", "
    " ], + tr: [ 2, "", "
    " ], + td: [ 3, "", "
    " ], + + _default: [ 0, "", "" ] +}; + +// Support: IE <=9 only +wrapMap.optgroup = wrapMap.option; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && jQuery.nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, contains, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( jQuery.type( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + contains = jQuery.contains( elem.ownerDocument, elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( contains ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; +} )(); +var documentElement = document.documentElement; + + + +var + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 only +// See #13393 for more info +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = {}; + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + // Make a writable jQuery.Event from the native event object + var event = jQuery.event.fix( nativeEvent ); + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // Triggered event must either 1) have no namespace, or 2) have namespace(s) + // a subset or equal to those in the bound event (both can have no namespace). + if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: jQuery.isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + focus: { + + // Fire native event if possible so blur/focus sequence is correct + trigger: function() { + if ( this !== safeActiveElement() && this.focus ) { + this.focus(); + return false; + } + }, + delegateType: "focusin" + }, + blur: { + trigger: function() { + if ( this === safeActiveElement() && this.blur ) { + this.blur(); + return false; + } + }, + delegateType: "focusout" + }, + click: { + + // For checkbox, fire native event so checked state will be right + trigger: function() { + if ( this.type === "checkbox" && this.click && jQuery.nodeName( this, "input" ) ) { + this.click(); + return false; + } + }, + + // For cross-browser consistency, don't fire native .click() on links + _default: function( event ) { + return jQuery.nodeName( event.target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || jQuery.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + + which: function( event ) { + var button = event.button; + + // Add which for key events + if ( event.which == null && rkeyEvent.test( event.type ) ) { + return event.charCode != null ? event.charCode : event.keyCode; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { + if ( button & 1 ) { + return 1; + } + + if ( button & 2 ) { + return 3; + } + + if ( button & 4 ) { + return 2; + } + + return 0; + } + + return event.which; + } +}, jQuery.event.addProp ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + /* eslint-disable max-len */ + + // See https://github.com/eslint/eslint/issues/3229 + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, + + /* eslint-enable */ + + // Support: IE <=10 - 11, Edge 12 - 13 + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +function manipulationTarget( elem, content ) { + if ( jQuery.nodeName( elem, "table" ) && + jQuery.nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return elem.getElementsByTagName( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + var match = rscriptTypeMasked.exec( elem.type ); + + if ( match ) { + elem.type = match[ 1 ]; + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.access( src ); + pdataCur = dataPriv.set( dest, pdataOld ); + events = pdataOld.events; + + if ( events ) { + delete pdataCur.handle; + pdataCur.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + isFunction = jQuery.isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( isFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( isFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl ) { + jQuery._evalUrl( node.src ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html.replace( rxhtmlTag, "<$1>" ); + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = jQuery.contains( elem.ownerDocument, elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rmargin = ( /^margin/ ); + +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + div.style.cssText = + "box-sizing:border-box;" + + "position:relative;display:block;" + + "margin:auto;border:1px;padding:1px;" + + "top:1%;width:50%"; + div.innerHTML = ""; + documentElement.appendChild( container ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = divStyle.marginLeft === "2px"; + boxSizingReliableVal = divStyle.width === "4px"; + + // Support: Android 4.0 - 4.3 only + // Some styles come back with percentage values, even though they shouldn't + div.style.marginRight = "50%"; + pixelMarginRightVal = divStyle.marginRight === "4px"; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + + "padding:0;margin-top:1px;position:absolute"; + container.appendChild( div ); + + jQuery.extend( support, { + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelMarginRight: function() { + computeStyleTests(); + return pixelMarginRightVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + style = elem.style; + + computed = computed || getStyles( elem ); + + // Support: IE <=9 only + // getPropertyValue is only needed for .css('filter') (#12537) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }, + + cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style; + +// Return a css property mapped to a potentially vendor prefixed property +function vendorPropName( name ) { + + // Shortcut for names that are not vendor prefixed + if ( name in emptyStyle ) { + return name; + } + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +function setPositiveNumber( elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { + var i, + val = 0; + + // If we already have the right measurement, avoid augmentation + if ( extra === ( isBorderBox ? "border" : "content" ) ) { + i = 4; + + // Otherwise initialize for horizontal or vertical properties + } else { + i = name === "width" ? 1 : 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin, so add it if we want it + if ( extra === "margin" ) { + val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); + } + + if ( isBorderBox ) { + + // border-box includes padding, so remove it if we want content + if ( extra === "content" ) { + val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // At this point, extra isn't border nor margin, so remove border + if ( extra !== "margin" ) { + val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } else { + + // At this point, extra isn't content, so add padding + val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // At this point, extra isn't content nor padding, so add border + if ( extra !== "padding" ) { + val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + return val; +} + +function getWidthOrHeight( elem, name, extra ) { + + // Start with offset property, which is equivalent to the border-box value + var val, + valueIsBorderBox = true, + styles = getStyles( elem ), + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + if ( elem.getClientRects().length ) { + val = elem.getBoundingClientRect()[ name ]; + } + + // Some non-html elements return undefined for offsetWidth, so check for null/undefined + // svg - https://bugzilla.mozilla.org/show_bug.cgi?id=649285 + // MathML - https://bugzilla.mozilla.org/show_bug.cgi?id=491668 + if ( val <= 0 || val == null ) { + + // Fall back to computed then uncomputed css if necessary + val = curCSS( elem, name, styles ); + if ( val < 0 || val == null ) { + val = elem.style[ name ]; + } + + // Computed unit is not pixels. Stop here and return. + if ( rnumnonpx.test( val ) ) { + return val; + } + + // Check for style in case a browser which returns unreliable values + // for getComputedStyle silently falls back to the reliable elem.style + valueIsBorderBox = isBorderBox && + ( support.boxSizingReliable() || val === elem.style[ name ] ); + + // Normalize "", auto, and prepare for extra + val = parseFloat( val ) || 0; + } + + // Use the active box-sizing model to add/subtract irrelevant styles + return ( val + + augmentWidthOrHeight( + elem, + name, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: { + "float": "cssFloat" + }, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = jQuery.camelCase( name ), + style = elem.style; + + name = jQuery.cssProps[ origName ] || + ( jQuery.cssProps[ origName ] = vendorPropName( origName ) || origName ); + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + if ( type === "number" ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + style[ name ] = value; + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = jQuery.camelCase( name ); + + // Make sure that we're working with the right name + name = jQuery.cssProps[ origName ] || + ( jQuery.cssProps[ origName ] = vendorPropName( origName ) || origName ); + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( i, name ) { + jQuery.cssHooks[ name ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, name, extra ); + } ) : + getWidthOrHeight( elem, name, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = extra && getStyles( elem ), + subtract = extra && augmentWidthOrHeight( + elem, + name, + extra, + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + styles + ); + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ name ] = value; + value = jQuery.css( elem, name ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( !rmargin.test( prefix ) ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( jQuery.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && + ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || + jQuery.cssHooks[ tween.prop ] ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, timerId, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function raf() { + if ( timerId ) { + window.requestAnimationFrame( raf ); + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = jQuery.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 13 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = jQuery.camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( jQuery.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + if ( percent < 1 && length ) { + return remaining; + } else { + deferred.resolveWith( elem, [ animation ] ); + return false; + } + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( jQuery.isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + jQuery.proxy( result.stop, result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( jQuery.isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + // attach callbacks from options + return animation.progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( jQuery.isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + jQuery.isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing + }; + + // Go to the end state if fx are off or if document is hidden + if ( jQuery.fx.off || document.hidden ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( jQuery.isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue && type !== false ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = jQuery.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Checks the timer has not already been removed + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + if ( timer() ) { + jQuery.fx.start(); + } else { + jQuery.timers.pop(); + } +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( !timerId ) { + timerId = window.requestAnimationFrame ? + window.requestAnimationFrame( raf ) : + window.setInterval( jQuery.fx.tick, jQuery.fx.interval ); + } +}; + +jQuery.fx.stop = function() { + if ( window.cancelAnimationFrame ) { + window.cancelAnimationFrame( timerId ); + } else { + window.clearInterval( timerId ); + } + + timerId = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + jQuery.nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value; + + if ( typeof stateVal === "boolean" && type === "string" ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( jQuery.isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( type === "string" ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = value.match( rnothtmlwhite ) || []; + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, isFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + isFunction = jQuery.isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( isFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( jQuery.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !jQuery.nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( jQuery.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + elem[ type ](); + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + + "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + + "change select submit keydown keypress keyup contextmenu" ).split( " " ), + function( i, name ) { + + // Handle event binding + jQuery.fn[ name ] = function( data, fn ) { + return arguments.length > 0 ? + this.on( name, null, data, fn ) : + this.trigger( name ); + }; +} ); + +jQuery.fn.extend( { + hover: function( fnOver, fnOut ) { + return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); + } +} ); + + + + +support.focusin = "onfocusin" in window; + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = jQuery.now(); + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) { + xml = undefined; + } + + if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { + jQuery.error( "Invalid XML: " + data ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( jQuery.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && jQuery.type( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = jQuery.isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + // If an array was passed in, assume that it is an array of form elements. + if ( jQuery.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ) + .filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ) + .map( function( i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( jQuery.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( jQuery.isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; + } + } + match = responseHeaders[ key.toLowerCase() ]; + } + return match == null ? null : match; + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 13 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available, append data to url + if ( s.data ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( jQuery.isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + + +jQuery._evalUrl = function( url ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + "throws": true + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( jQuery.isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( jQuery.isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var isFunction = jQuery.isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain requests + if ( s.crossDomain ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + with w.tag('body'): + if 'js' in self.html: + with w.xml_cleaning_method('none'): + with w.tag('script'): + w.data(self.html['js']) + if isinstance(self.html['table_id'], six.string_types): + html_table_id = self.html['table_id'] + else: + html_table_id = None + if 'table_class' in self.html: + html_table_class = self.html['table_class'] + attrib = {"class": html_table_class} + else: + attrib = {} + with w.tag('table', id=html_table_id, attrib=attrib): + with w.tag('thead'): + with w.tag('tr'): + for col in cols: + if len(col.shape) > 1 and self.html['multicol']: + # Set colspan attribute for multicolumns + w.start('th', colspan=col.shape[1]) + else: + w.start('th') + w.data(col.info.name.strip()) + w.end(indent=False) + col_str_iters = [] + new_cols_escaped = [] + for col, col_escaped in zip(cols, cols_escaped): + if len(col.shape) > 1 and self.html['multicol']: + span = col.shape[1] + for i in range(span): + # Split up multicolumns into separate columns + new_col = Column([el[i] for el in col]) + + new_col_iter_str_vals = self.fill_values(col, new_col.info.iter_str_vals()) + col_str_iters.append(new_col_iter_str_vals) + new_cols_escaped.append(col_escaped) + else: + + col_iter_str_vals = self.fill_values(col, col.info.iter_str_vals()) + col_str_iters.append(col_iter_str_vals) + + new_cols_escaped.append(col_escaped) + + for row in zip(*col_str_iters): + with w.tag('tr'): + for el, col_escaped in zip(row, new_cols_escaped): + # Potentially disable HTML escaping for column + method = ('escape_xml' if col_escaped else 'bleach_clean') + with w.xml_cleaning_method(method, **raw_html_clean_kwargs): + w.start('td') + w.data(el.strip()) + w.end(indent=False) + + # Fixes XMLWriter's insertion of unwanted line breaks + return [''.join(lines)] + + def fill_values(self, col, col_str_iters): + """ + Return an iterator of the values with replacements based on fill_values + """ + # check if the col is a masked column and has fill values + is_masked_column = hasattr(col, 'mask') + has_fill_values = hasattr(col, 'fill_values') + + for idx, col_str in enumerate(col_str_iters): + if is_masked_column and has_fill_values: + if col.mask[idx]: + yield col.fill_values[core.masked] + continue + + if has_fill_values: + if col_str in col.fill_values: + yield col.fill_values[col_str] + continue + + yield col_str diff --git a/astropy/io/ascii/ipac.py b/astropy/io/ascii/ipac.py new file mode 100644 index 0000000..d178d22 --- /dev/null +++ b/astropy/io/ascii/ipac.py @@ -0,0 +1,526 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""An extensible ASCII table reader and writer. + +ipac.py: + Classes to read IPAC table format + +:Copyright: Smithsonian Astrophysical Observatory (2011) +:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu) +""" + +from __future__ import absolute_import, division, print_function + +import re +from collections import defaultdict, OrderedDict +from textwrap import wrap +from warnings import warn + +from ...extern import six +from ...extern.six.moves import zip + +from . import core +from . import fixedwidth +from . import basic +from ...utils.exceptions import AstropyUserWarning +from ...table.pprint import get_auto_format_func + + +class IpacFormatErrorDBMS(Exception): + def __str__(self): + return '{0}\nSee {1}'.format( + super(Exception, self).__str__(), + 'http://irsa.ipac.caltech.edu/applications/DDGEN/Doc/DBMSrestriction.html') + + +class IpacFormatError(Exception): + def __str__(self): + return '{0}\nSee {1}'.format( + super(Exception, self).__str__(), + 'http://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html') + + +class IpacHeaderSplitter(core.BaseSplitter): + '''Splitter for Ipac Headers. + + This splitter is similar its parent when reading, but supports a + fixed width format (as required for Ipac table headers) for writing. + ''' + process_line = None + process_val = None + delimiter = '|' + delimiter_pad = '' + skipinitialspace = False + comment = r'\s*\\' + write_comment = r'\\' + col_starts = None + col_ends = None + + def join(self, vals, widths): + pad = self.delimiter_pad or '' + delimiter = self.delimiter or '' + padded_delim = pad + delimiter + pad + bookend_left = delimiter + pad + bookend_right = pad + delimiter + + vals = [' ' * (width - len(val)) + val for val, width in zip(vals, widths)] + return bookend_left + padded_delim.join(vals) + bookend_right + + +class IpacHeader(fixedwidth.FixedWidthHeader): + """IPAC table header""" + splitter_class = IpacHeaderSplitter + + # Defined ordered list of possible types. Ordering is needed to + # distinguish between "d" (double) and "da" (date) as defined by + # the IPAC standard for abbreviations. This gets used in get_col_type(). + col_type_list = (('integer', core.IntType), + ('long', core.IntType), + ('double', core.FloatType), + ('float', core.FloatType), + ('real', core.FloatType), + ('char', core.StrType), + ('date', core.StrType)) + definition = 'ignore' + start_line = None + + def process_lines(self, lines): + """Generator to yield IPAC header lines, i.e. those starting and ending with + delimiter character (with trailing whitespace stripped)""" + delim = self.splitter.delimiter + for line in lines: + line = line.rstrip() + if line.startswith(delim) and line.endswith(delim): + yield line.strip(delim) + + def update_meta(self, lines, meta): + """ + Extract table-level comments and keywords for IPAC table. See: + http://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html#kw + """ + def process_keyword_value(val): + """ + Take a string value and convert to float, int or str, and strip quotes + as needed. + """ + val = val.strip() + try: + val = int(val) + except Exception: + try: + val = float(val) + except Exception: + # Strip leading/trailing quote. The spec says that a matched pair + # of quotes is required, but this code will allow a non-quoted value. + for quote in ('"', "'"): + if val.startswith(quote) and val.endswith(quote): + val = val[1:-1] + break + return val + + table_meta = meta['table'] + table_meta['comments'] = [] + table_meta['keywords'] = OrderedDict() + keywords = table_meta['keywords'] + + re_keyword = re.compile(r'\\' + r'(?P \w+)' + r'\s* = (?P .+) $', + re.VERBOSE) + for line in lines: + # Keywords and comments start with "\". Once the first non-slash + # line is seen then bail out. + if not line.startswith('\\'): + break + + m = re_keyword.match(line) + if m: + name = m.group('name') + val = process_keyword_value(m.group('value')) + + # IPAC allows for continuation keywords, e.g. + # \SQL = 'WHERE ' + # \SQL = 'SELECT (25 column names follow in next row.)' + if name in keywords and isinstance(val, six.string_types): + prev_val = keywords[name]['value'] + if isinstance(prev_val, six.string_types): + val = prev_val + val + + keywords[name] = {'value': val} + else: + # Comment is required to start with "\ " + if line.startswith('\\ '): + val = line[2:].strip() + if val: + table_meta['comments'].append(val) + + def get_col_type(self, col): + for (col_type_key, col_type) in self.col_type_list: + if col_type_key.startswith(col.raw_type.lower()): + return col_type + else: + raise ValueError('Unknown data type ""{}"" for column "{}"'.format( + col.raw_type, col.name)) + + def get_cols(self, lines): + """ + Initialize the header Column objects from the table ``lines``. + + Based on the previously set Header attributes find or create the column names. + Sets ``self.cols`` with the list of Columns. + + Parameters + ---------- + lines : list + List of table lines + + """ + header_lines = self.process_lines(lines) # generator returning valid header lines + header_vals = [vals for vals in self.splitter(header_lines)] + if len(header_vals) == 0: + raise ValueError('At least one header line beginning and ending with ' + 'delimiter required') + elif len(header_vals) > 4: + raise ValueError('More than four header lines were found') + + # Generate column definitions + cols = [] + start = 1 + for i, name in enumerate(header_vals[0]): + col = core.Column(name=name.strip(' -')) + col.start = start + col.end = start + len(name) + if len(header_vals) > 1: + col.raw_type = header_vals[1][i].strip(' -') + col.type = self.get_col_type(col) + if len(header_vals) > 2: + col.unit = header_vals[2][i].strip() or None # Can't strip dashes here + if len(header_vals) > 3: + # The IPAC null value corresponds to the io.ascii bad_value. + # In this case there isn't a fill_value defined, so just put + # in the minimal entry that is sure to convert properly to the + # required type. + # + # Strip spaces but not dashes (not allowed in NULL row per + # https://github.com/astropy/astropy/issues/361) + null = header_vals[3][i].strip() + fillval = '' if issubclass(col.type, core.StrType) else '0' + self.data.fill_values.append((null, fillval, col.name)) + start = col.end + 1 + cols.append(col) + + # Correct column start/end based on definition + if self.ipac_definition == 'right': + col.start -= 1 + elif self.ipac_definition == 'left': + col.end += 1 + + self.names = [x.name for x in cols] + self.cols = cols + + def str_vals(self): + + if self.DBMS: + IpacFormatE = IpacFormatErrorDBMS + else: + IpacFormatE = IpacFormatError + + namelist = self.colnames + if self.DBMS: + countnamelist = defaultdict(int) + for name in self.colnames: + countnamelist[name.lower()] += 1 + doublenames = [x for x in countnamelist if countnamelist[x] > 1] + if doublenames != []: + raise IpacFormatE('IPAC DBMS tables are not case sensitive. ' + 'This causes duplicate column names: {0}'.format(doublenames)) + + for name in namelist: + m = re.match(r'\w+', name) + if m.end() != len(name): + raise IpacFormatE('{0} - Only alphanumeric characters and _ ' + 'are allowed in column names.'.format(name)) + if self.DBMS and not(name[0].isalpha() or (name[0] == '_')): + raise IpacFormatE('Column name cannot start with numbers: {}'.format(name)) + if self.DBMS: + if name in ['x', 'y', 'z', 'X', 'Y', 'Z']: + raise IpacFormatE('{0} - x, y, z, X, Y, Z are reserved names and ' + 'cannot be used as column names.'.format(name)) + if len(name) > 16: + raise IpacFormatE( + '{0} - Maximum length for column name is 16 characters'.format(name)) + else: + if len(name) > 40: + raise IpacFormatE( + '{0} - Maximum length for column name is 40 characters.'.format(name)) + + dtypelist = [] + unitlist = [] + nullist = [] + for col in self.cols: + col_dtype = col.info.dtype + col_unit = col.info.unit + col_format = col.info.format + + if col_dtype.kind in ['i', 'u']: + dtypelist.append('long') + elif col_dtype.kind == 'f': + dtypelist.append('double') + else: + dtypelist.append('char') + + if col_unit is None: + unitlist.append('') + else: + unitlist.append(str(col.info.unit)) + # This may be incompatible with mixin columns + null = col.fill_values[core.masked] + try: + auto_format_func = get_auto_format_func(col) + format_func = col.info._format_funcs.get(col_format, auto_format_func) + nullist.append((format_func(col_format, null)).strip()) + except Exception: + # It is possible that null and the column values have different + # data types (e.g. number and null = 'null' (i.e. a string). + # This could cause all kinds of exceptions, so a catch all + # block is needed here + nullist.append(str(null).strip()) + + return [namelist, dtypelist, unitlist, nullist] + + def write(self, lines, widths): + '''Write header. + + The width of each column is determined in Ipac.write. Writing the header + must be delayed until that time. + This function is called from there, once the width information is + available.''' + + for vals in self.str_vals(): + lines.append(self.splitter.join(vals, widths)) + return lines + + +class IpacDataSplitter(fixedwidth.FixedWidthSplitter): + delimiter = ' ' + delimiter_pad = '' + bookend = True + + +class IpacData(fixedwidth.FixedWidthData): + """IPAC table data reader""" + comment = r'[|\\]' + start_line = 0 + splitter_class = IpacDataSplitter + fill_values = [(core.masked, 'null')] + + def write(self, lines, widths, vals_list): + """ IPAC writer, modified from FixedWidth writer """ + for vals in vals_list: + lines.append(self.splitter.join(vals, widths)) + return lines + + +class Ipac(basic.Basic): + r"""Read or write an IPAC format table. See + http://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html:: + + \\name=value + \\ Comment + | column1 | column2 | column3 | column4 | column5 | + | double | double | int | double | char | + | unit | unit | unit | unit | unit | + | null | null | null | null | null | + 2.0978 29.09056 73765 2.06000 B8IVpMnHg + + Or:: + + |-----ra---|----dec---|---sao---|------v---|----sptype--------| + 2.09708 29.09056 73765 2.06000 B8IVpMnHg + + The comments and keywords defined in the header are available via the output + table ``meta`` attribute:: + + >>> import os + >>> from astropy.io import ascii + >>> filename = os.path.join(ascii.__path__[0], 'tests/t/ipac.dat') + >>> data = ascii.read(filename) + >>> print(data.meta['comments']) + ['This is an example of a valid comment'] + >>> for name, keyword in data.meta['keywords'].items(): + ... print(name, keyword['value']) + ... + intval 1 + floatval 2300.0 + date Wed Sp 20 09:48:36 1995 + key_continue IPAC keywords can continue across lines + + Note that there are different conventions for characters occuring below the + position of the ``|`` symbol in IPAC tables. By default, any character + below a ``|`` will be ignored (since this is the current standard), + but if you need to read files that assume characters below the ``|`` + symbols belong to the column before or after the ``|``, you can specify + ``definition='left'`` or ``definition='right'`` respectively when reading + the table (the default is ``definition='ignore'``). The following examples + demonstrate the different conventions: + + * ``definition='ignore'``:: + + | ra | dec | + | float | float | + 1.2345 6.7890 + + * ``definition='left'``:: + + | ra | dec | + | float | float | + 1.2345 6.7890 + + * ``definition='right'``:: + + | ra | dec | + | float | float | + 1.2345 6.7890 + + IPAC tables can specify a null value in the header that is shown in place + of missing or bad data. On writing, this value defaults to ``null``. + To specify a different null value, use the ``fill_values`` option to + replace masked values with a string or number of your choice as + described in :ref:`io_ascii_write_parameters`:: + + >>> from astropy.io.ascii import masked + >>> fill = [(masked, 'N/A', 'ra'), (masked, -999, 'sptype')] + >>> ascii.write(data, format='ipac', fill_values=fill) + \ This is an example of a valid comment + ... + | ra| dec| sai| v2| sptype| + | double| double| long| double| char| + | unit| unit| unit| unit| ergs| + | N/A| null| null| null| -999| + N/A 29.09056 null 2.06 -999 + 2345678901.0 3456789012.0 456789012 4567890123.0 567890123456789012 + + + Parameters + ---------- + definition : str, optional + Specify the convention for characters in the data table that occur + directly below the pipe (``|``) symbol in the header column definition: + + * 'ignore' - Any character beneath a pipe symbol is ignored (default) + * 'right' - Character is associated with the column to the right + * 'left' - Character is associated with the column to the left + + DBMS : bool, optional + If true, this verifies that written tables adhere (semantically) + to the `IPAC/DBMS `_ + definition of IPAC tables. If 'False' it only checks for the (less strict) + `IPAC `_ + definition. + """ + _format_name = 'ipac' + _io_registry_format_aliases = ['ipac'] + _io_registry_can_write = True + _description = 'IPAC format table' + + data_class = IpacData + header_class = IpacHeader + + def __init__(self, definition='ignore', DBMS=False): + super(Ipac, self).__init__() + # Usually the header is not defined in __init__, but here it need a keyword + if definition in ['ignore', 'left', 'right']: + self.header.ipac_definition = definition + else: + raise ValueError("definition should be one of ignore/left/right") + self.header.DBMS = DBMS + + def write(self, table): + """ + Write ``table`` as list of strings. + + Parameters + ---------- + table : `~astropy.table.Table` + Input table data + + Returns + ------- + lines : list + List of strings corresponding to ASCII table + + """ + # Set a default null value for all columns by adding at the end, which + # is the position with the lowest priority. + # We have to do it this late, because the fill_value + # defined in the class can be overwritten by ui.write + self.data.fill_values.append((core.masked, 'null')) + + # Check column names before altering + self.header.cols = list(six.itervalues(table.columns)) + self.header.check_column_names(self.names, self.strict_names, self.guessing) + + core._apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names) + + # Now use altered columns + new_cols = list(six.itervalues(table.columns)) + # link information about the columns to the writer object (i.e. self) + self.header.cols = new_cols + self.data.cols = new_cols + + # Write header and data to lines list + lines = [] + # Write meta information + if 'comments' in table.meta: + for comment in table.meta['comments']: + if len(str(comment)) > 78: + warn('Comment string > 78 characters was automatically wrapped.', + AstropyUserWarning) + for line in wrap(str(comment), 80, initial_indent='\\ ', subsequent_indent='\\ '): + lines.append(line) + if 'keywords' in table.meta: + keydict = table.meta['keywords'] + for keyword in keydict: + try: + val = keydict[keyword]['value'] + lines.append('\\{0}={1!r}'.format(keyword.strip(), val)) + # meta is not standardized: Catch some common Errors. + except TypeError: + warn("Table metadata keyword {0} has been skipped. " + "IPAC metadata must be in the form {{'keywords':" + "{{'keyword': {{'value': value}} }}".format(keyword), + AstropyUserWarning) + ignored_keys = [key for key in table.meta if key not in ('keywords', 'comments')] + if any(ignored_keys): + warn("Table metadata keyword(s) {0} were not written. " + "IPAC metadata must be in the form {{'keywords':" + "{{'keyword': {{'value': value}} }}".format(ignored_keys), + AstropyUserWarning + ) + + # Usually, this is done in data.write, but since the header is written + # first, we need that here. + self.data._set_fill_values(self.data.cols) + + # get header and data as strings to find width of each column + for i, col in enumerate(table.columns.values()): + col.headwidth = max([len(vals[i]) for vals in self.header.str_vals()]) + # keep data_str_vals because they take some time to make + data_str_vals = [] + col_str_iters = self.data.str_vals() + for vals in zip(*col_str_iters): + data_str_vals.append(vals) + + for i, col in enumerate(table.columns.values()): + # FIXME: In Python 3.4, use max([], default=0). + # See: https://docs.python.org/3/library/functions.html#max + if data_str_vals: + col.width = max([len(vals[i]) for vals in data_str_vals]) + else: + col.width = 0 + + widths = [max(col.width, col.headwidth) for col in table.columns.values()] + # then write table + self.header.write(lines, widths) + self.data.write(lines, widths, data_str_vals) + + return lines diff --git a/astropy/io/ascii/latex.py b/astropy/io/ascii/latex.py new file mode 100644 index 0000000..6dee759 --- /dev/null +++ b/astropy/io/ascii/latex.py @@ -0,0 +1,444 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""An extensible ASCII table reader and writer. + +latex.py: + Classes to read and write LaTeX tables + +:Copyright: Smithsonian Astrophysical Observatory (2011) +:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu) +""" + +from __future__ import absolute_import, division, print_function + +import re + +from ...extern import six +from ...extern.six.moves import zip +from . import core + +latexdicts = {'AA': {'tabletype': 'table', + 'header_start': r'\hline \hline', 'header_end': r'\hline', + 'data_end': r'\hline'}, + 'doublelines': {'tabletype': 'table', + 'header_start': r'\hline \hline', 'header_end': r'\hline\hline', + 'data_end': r'\hline\hline'}, + 'template': {'tabletype': 'tabletype', 'caption': 'caption', + 'tablealign': 'tablealign', + 'col_align': 'col_align', 'preamble': 'preamble', + 'header_start': 'header_start', + 'header_end': 'header_end', 'data_start': 'data_start', + 'data_end': 'data_end', 'tablefoot': 'tablefoot', + 'units': {'col1': 'unit of col1', 'col2': 'unit of col2'}} + } + + +RE_COMMENT = re.compile(r'(?`_ some header + keywords differ from standard LaTeX. + + This header is modified to take that into account. + ''' + header_start = r'\tablehead' + splitter_class = AASTexHeaderSplitter + + def start_line(self, lines): + return find_latex_line(lines, r'\tablehead') + + def write(self, lines): + if 'col_align' not in self.latex: + self.latex['col_align'] = len(self.cols) * 'c' + if 'tablealign' in self.latex: + align = '[' + self.latex['tablealign'] + ']' + else: + align = '' + lines.append(r'\begin{' + self.latex['tabletype'] + r'}{' + self.latex['col_align'] + r'}' + + align) + add_dictval_to_list(self.latex, 'preamble', lines) + if 'caption' in self.latex: + lines.append(r'\tablecaption{' + self.latex['caption'] + '}') + tablehead = ' & '.join([r'\colhead{' + name + '}' for name in self.colnames]) + units = self._get_units() + if 'units' in self.latex: + units.update(self.latex['units']) + if units: + tablehead += r'\\ ' + self.splitter.join([units.get(name, ' ') + for name in self.colnames]) + lines.append(r'\tablehead{' + tablehead + '}') + + +class AASTexData(LatexData): + r'''In a `deluxetable`_ the data is enclosed in `\startdata` and `\enddata` + ''' + data_start = r'\startdata' + data_end = r'\enddata' + + def start_line(self, lines): + return find_latex_line(lines, self.data_start) + 1 + + def write(self, lines): + lines.append(self.data_start) + lines_length_initial = len(lines) + core.BaseData.write(self, lines) + # To remove extra space(s) and // appended which creates an extra new line + # in the end. + if len(lines) > lines_length_initial: + # we compile separately because py2.6 doesn't have a flags keyword in re.sub + re_final_line = re.compile(r'\s* \\ \\ \s* $', flags=re.VERBOSE) + lines[-1] = re.sub(re_final_line, '', lines[-1]) + lines.append(self.data_end) + add_dictval_to_list(self.latex, 'tablefoot', lines) + lines.append(r'\end{' + self.latex['tabletype'] + r'}') + + +class AASTex(Latex): + '''Write and read AASTeX tables. + + This class implements some AASTeX specific commands. + AASTeX is used for the AAS (American Astronomical Society) + publications like ApJ, ApJL and AJ. + + It derives from the ``Latex`` reader and accepts the same + keywords. However, the keywords ``header_start``, ``header_end``, + ``data_start`` and ``data_end`` in ``latexdict`` have no effect. + ''' + + _format_name = 'aastex' + _io_registry_format_aliases = ['aastex'] + _io_registry_suffix = '' # AASTex inherits from Latex, so override this class attr + _description = 'AASTeX deluxetable used for AAS journals' + + header_class = AASTexHeader + data_class = AASTexData + + def __init__(self, **kwargs): + super(AASTex, self).__init__(**kwargs) + # check if tabletype was explicitly set by the user + if not (('latexdict' in kwargs) and ('tabletype' in kwargs['latexdict'])): + self.latex['tabletype'] = 'deluxetable' diff --git a/astropy/io/ascii/misc.py b/astropy/io/ascii/misc.py new file mode 100644 index 0000000..b893635 --- /dev/null +++ b/astropy/io/ascii/misc.py @@ -0,0 +1,129 @@ +"""A Collection of useful miscellaneous functions. + +misc.py: + Collection of useful miscellaneous functions. + +:Author: Hannes Breytenbach (hannes@saao.ac.za) +""" + +from __future__ import absolute_import, division, print_function + +import collections +import itertools +import operator + +from ...extern.six.moves import zip, map, filter + + +def first_true_index(iterable, pred=None, default=None): + """find the first index position for the which the callable pred returns True""" + if pred is None: + func = operator.itemgetter(1) + else: + func = lambda x: pred(x[1]) + ii = next(filter(func, enumerate(iterable)), default) # either index-item pair or default + return ii[0] if ii else default + + +def first_false_index(iterable, pred=None, default=None): + """find the first index position for the which the callable pred returns False""" + if pred is None: + func = operator.not_ + else: + func = lambda x: not pred(x) + return first_true_index(iterable, func, default) + + +def sortmore(*args, **kw): + """ + Sorts any number of lists according to: + optionally given item sorting key function(s) and/or a global sorting key function. + + Parameters + ---------- + One or more lists + + Keywords + -------- + globalkey : None + revert to sorting by key function + globalkey : callable + Sort by evaluated value for all items in the lists + (call signature of this function needs to be such that it accepts an + argument tuple of items from each list. + eg.: globalkey = lambda *l: sum(l) will order all the lists by the + sum of the items from each list + + if key: None + sorting done by value of first input list + (in this case the objects in the first iterable need the comparison + methods __lt__ etc...) + if key: callable + sorting done by value of key(item) for items in first iterable + if key: tuple + sorting done by value of (key(item_0), ..., key(item_n)) for items in + the first n iterables (where n is the length of the key tuple) + i.e. the first callable is the primary sorting criterion, and the + rest act as tie-breakers. + + Returns + ------- + Sorted lists + + Examples + -------- + Capture sorting indeces: + l = list('CharacterS') + In [1]: sortmore( l, range(len(l)) ) + Out[1]: (['C', 'S', 'a', 'a', 'c', 'e', 'h', 'r', 'r', 't'], + [0, 9, 2, 4, 5, 7, 1, 3, 8, 6]) + In [2]: sortmore( l, range(len(l)), key=str.lower ) + Out[2]: (['a', 'a', 'C', 'c', 'e', 'h', 'r', 'r', 'S', 't'], + [2, 4, 0, 5, 7, 1, 3, 8, 9, 6]) + """ + + first = list(args[0]) + if not len(first): + return args + + globalkey = kw.get('globalkey') + key = kw.get('key') + if key is None: + if globalkey: + # if global sort function given and no local (secondary) key given, ==> no tiebreakers + key = lambda x: 0 + else: + key = lambda x: x # if no global sort and no local sort keys given, sort by item values + if globalkey is None: + globalkey = lambda *x: 0 + + if not isinstance(globalkey, collections.Callable): + raise ValueError('globalkey needs to be callable') + + if isinstance(key, collections.Callable): + k = lambda x: (globalkey(*x), key(x[0])) + elif isinstance(key, tuple): + key = (k if k else lambda x: 0 for k in key) + k = lambda x: (globalkey(*x),) + tuple(f(z) for (f, z) in zip(key, x)) + else: + raise KeyError( + "kw arg 'key' should be None, callable, or a sequence of callables, not {}" + .format(type(key))) + + res = sorted(list(zip(*args)), key=k) + if 'order' in kw: + if kw['order'].startswith(('descend', 'reverse')): + res = reversed(res) + + return tuple(map(list, zip(*res))) + + +def groupmore(func=None, *its): + """Extends the itertools.groupby functionality to arbitrary number of iterators.""" + if not func: + func = lambda x: x + its = sortmore(*its, key=func) + nfunc = lambda x: func(x[0]) + zipper = itertools.groupby(zip(*its), nfunc) + unzipper = ((key, zip(*groups)) for key, groups in zipper) + return unzipper diff --git a/astropy/io/ascii/rst.py b/astropy/io/ascii/rst.py new file mode 100644 index 0000000..22cd443 --- /dev/null +++ b/astropy/io/ascii/rst.py @@ -0,0 +1,63 @@ +# Licensed under a 3-clause BSD style license +""" +:Author: Simon Gibbons (simongibbons@gmail.com) +""" + +from __future__ import absolute_import, division, print_function + +from .core import DefaultSplitter +from .fixedwidth import (FixedWidth, + FixedWidthData, + FixedWidthHeader, + FixedWidthTwoLineDataSplitter) + + +class SimpleRSTHeader(FixedWidthHeader): + position_line = 0 + start_line = 1 + splitter_class = DefaultSplitter + position_char = '=' + + def get_fixedwidth_params(self, line): + vals, starts, ends = super(SimpleRSTHeader, self).get_fixedwidth_params(line) + # The right hand column can be unbounded + ends[-1] = None + return vals, starts, ends + + +class SimpleRSTData(FixedWidthData): + start_line = 3 + end_line = -1 + splitter_class = FixedWidthTwoLineDataSplitter + + +class RST(FixedWidth): + """ + Read or write a `reStructuredText simple format table + `_. + + Example:: + + ==== ===== ====== + Col1 Col2 Col3 + ==== ===== ====== + 1 2.3 Hello + 2 4.5 Worlds + ==== ===== ====== + + Currently there is no support for reading tables which utilize continuation lines, + or for ones which define column spans through the use of an additional + line of dashes in the header. + """ + _format_name = 'rst' + _description = 'reStructuredText simple table' + data_class = SimpleRSTData + header_class = SimpleRSTHeader + + def __init__(self): + super(RST, self).__init__(delimiter_pad=None, bookend=False) + + def write(self, lines): + lines = super(RST, self).write(lines) + lines = [lines[1]] + lines + [lines[1]] + return lines diff --git a/astropy/io/ascii/setup_package.py b/astropy/io/ascii/setup_package.py new file mode 100644 index 0000000..ed76b12 --- /dev/null +++ b/astropy/io/ascii/setup_package.py @@ -0,0 +1,94 @@ +# Licensed under a 3-clause BSD style license +from __future__ import absolute_import + +import os +from distutils.extension import Extension + +ROOT = os.path.relpath(os.path.dirname(__file__)) + + +def get_extensions(): + sources = [os.path.join(ROOT, 'cparser.pyx'), + os.path.join(ROOT, 'src', 'tokenizer.c')] + ascii_ext = Extension( + name="astropy.io.ascii.cparser", + include_dirs=["numpy"], + sources=sources) + return [ascii_ext] + + +def get_package_data(): + # Installs the testing data files. Unable to get package_data + # to deal with a directory hierarchy of files, so just explicitly list. + return { + 'astropy.io.ascii.tests': ['t/vizier/ReadMe', + 't/vizier/table1.dat', + 't/vizier/table5.dat', + 't/apostrophe.rdb', + 't/apostrophe.tab', + 't/bad.txt', + 't/bars_at_ends.txt', + 't/cds.dat', + 't/cds_malformed.dat', + 't/cds/glob/ReadMe', + 't/cds/glob/lmxbrefs.dat', + 't/cds/multi/ReadMe', + 't/cds/multi/lhs2065.dat', + 't/cds/multi/lp944-20.dat', + 't/cds2.dat', + 't/commented_header.dat', + 't/commented_header2.dat', + 't/continuation.dat', + 't/daophot.dat', + 't/daophot2.dat', + 't/daophot3.dat', + 't/daophot4.dat', + 't/sextractor.dat', + 't/sextractor2.dat', + 't/sextractor3.dat', + 't/daophot.dat.gz', + 't/fill_values.txt', + 't/html.html', + 't/html2.html', + 't/ipac.dat', + 't/ipac.dat.bz2', + 't/ipac.dat.xz', + 't/latex1.tex', + 't/latex1.tex.gz', + 't/latex2.tex', + 't/latex3.tex', + 't/nls1_stackinfo.dbout', + 't/no_data_cds.dat', + 't/no_data_daophot.dat', + 't/no_data_sextractor.dat', + 't/no_data_ipac.dat', + 't/no_data_with_header.dat', + 't/no_data_without_header.dat', + 't/short.rdb', + 't/short.rdb.bz2', + 't/short.rdb.gz', + 't/short.rdb.xz', + 't/short.tab', + 't/simple.txt', + 't/simple2.txt', + 't/simple3.txt', + 't/simple4.txt', + 't/simple5.txt', + 't/space_delim_blank_lines.txt', + 't/space_delim_no_header.dat', + 't/space_delim_no_names.dat', + 't/test4.dat', + 't/test5.dat', + 't/vots_spec.dat', + 't/whitespace.dat', + 't/simple_csv.csv', + 't/simple_csv_missing.csv', + 't/fixed_width_2_line.txt', + 't/cds/description/ReadMe', + 't/cds/description/table.dat', + ] + } + + +def requires_2to3(): + return False diff --git a/astropy/io/ascii/sextractor.py b/astropy/io/ascii/sextractor.py new file mode 100644 index 0000000..741e2de --- /dev/null +++ b/astropy/io/ascii/sextractor.py @@ -0,0 +1,151 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" sextractor.py: + Classes to read SExtractor table format + +Built on daophot.py: +:Copyright: Smithsonian Astrophysical Observatory (2011) +:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu) +""" + +from __future__ import absolute_import, division, print_function + +import re + +from . import core +from ...extern.six.moves import range + + +class SExtractorHeader(core.BaseHeader): + """Read the header from a file produced by SExtractor.""" + comment = r'^\s*#\s*\S\D.*' # Find lines that don't have "# digit" + + def get_cols(self, lines): + """ + Initialize the header Column objects from the table ``lines`` for a SExtractor + header. The SExtractor header is specialized so that we just copy the entire BaseHeader + get_cols routine and modify as needed. + + Parameters + ---------- + lines : list + List of table lines + + """ + + # This assumes that the columns are listed in order, one per line with a + # header comment string of the format: "# 1 ID short description [unit]" + # However, some may be missing and must be inferred from skipped column numbers + columns = {} + # E.g. '# 1 ID identification number' (without units) or '# 2 MAGERR magnitude of error [mag]' + # Updated along with issue #4603, for more robust parsing of unit + re_name_def = re.compile(r"""^\s* \# \s* # possible whitespace around # + (?P [0-9]+)\s+ # number of the column in table + (?P [-\w]+) # name of the column + (?:\s+(?P \w .+) # column description, match any character until... + (?:(?.+)\])?.* # match units in brackets + """, re.VERBOSE) + dataline = None + for line in lines: + if not line.startswith('#'): + dataline = line # save for later to infer the actual number of columns + break # End of header lines + else: + match = re_name_def.search(line) + if match: + colnumber = int(match.group('colnumber')) + colname = match.group('colname') + coldescr = match.group('coldescr') + colunit = match.group('colunit') # If no units are given, colunit = None + columns[colnumber] = (colname, coldescr, colunit) + # Handle skipped column numbers + colnumbers = sorted(columns) + # Handle the case where the last column is array-like by append a pseudo column + # If there are more data columns than the largest column number + # then add a pseudo-column that will be dropped later. This allows + # the array column logic below to work in all cases. + if dataline is not None: + n_data_cols = len(dataline.split()) + else: + n_data_cols = colnumbers[-1] # handles no data, where we have to rely on the last column number + # sextractor column number start at 1. + columns[n_data_cols + 1] = (None, None, None) + colnumbers.append(n_data_cols + 1) + if len(columns) > 1: # only fill in skipped columns when there is genuine column initially + previous_column = 0 + for n in colnumbers: + if n != previous_column + 1: + for c in range(previous_column+1, n): + column_name = columns[previous_column][0]+"_{}".format(c-previous_column) + column_descr = columns[previous_column][1] + column_unit = columns[previous_column][2] + columns[c] = (column_name, column_descr, column_unit) + previous_column = n + # Add the columns in order to self.names + colnumbers = sorted(columns)[:-1] # drop the pseudo column + self.names = [] + for n in colnumbers: + self.names.append(columns[n][0]) + + if not self.names: + raise core.InconsistentTableError('No column names found in SExtractor header') + + self.cols = [] + for n in colnumbers: + col = core.Column(name=columns[n][0]) + col.description = columns[n][1] + col.unit = columns[n][2] + self.cols.append(col) + + +class SExtractorData(core.BaseData): + start_line = 0 + delimiter = ' ' + comment = r'\s*#' + + +class SExtractor(core.BaseReader): + """Read a SExtractor file. + SExtractor is a package for faint-galaxy photometry. + Bertin & Arnouts 1996, A&A Supp. 317, 393. + http://www.astromatic.net/software/sextractor + + Example:: + + # 1 NUMBER + # 2 ALPHA_J2000 + # 3 DELTA_J2000 + # 4 FLUX_RADIUS + # 7 MAG_AUTO [mag] + # 8 X2_IMAGE Variance along x [pixel**2] + # 9 X_MAMA Barycenter position along MAMA x axis [m**(-6)] + # 10 MU_MAX Peak surface brightness above background [mag * arcsec**(-2)] + 1 32.23222 10.1211 0.8 1.2 1.4 18.1 1000.0 0.00304 -3.498 + 2 38.12321 -88.1321 2.2 2.4 3.1 17.0 1500.0 0.00908 1.401 + + Note the skipped numbers since flux_radius has 3 columns. The three FLUX_RADIUS + columns will be named FLUX_RADIUS, FLUX_RADIUS_1, FLUX_RADIUS_2 + Also note that a post-ID description (e.g. "Variance along x") is + optional and that units may be specified at the end of a line in brackets. + """ + _format_name = 'sextractor' + _io_registry_can_write = False + _description = 'SExtractor format table' + + header_class = SExtractorHeader + data_class = SExtractorData + inputter_class = core.ContinuationLinesInputter + + def read(self, table): + """ + Read input data (file-like object, filename, list of strings, or + single string) into a Table and return the result. + """ + out = super(SExtractor, self).read(table) + # remove the comments + if 'comments' in out.meta: + del out.meta['comments'] + return out + + def write(self, table): + raise NotImplementedError diff --git a/astropy/io/ascii/src/tokenizer.c b/astropy/io/ascii/src/tokenizer.c new file mode 100644 index 0000000..ab50da8 --- /dev/null +++ b/astropy/io/ascii/src/tokenizer.c @@ -0,0 +1,1062 @@ +// Licensed under a 3-clause BSD style license - see LICENSE.rst + +#include "tokenizer.h" + +tokenizer_t *create_tokenizer(char delimiter, char comment, char quotechar, char expchar, + int fill_extra_cols, int strip_whitespace_lines, + int strip_whitespace_fields, int use_fast_converter) +{ + // Create the tokenizer in memory + tokenizer_t *tokenizer = (tokenizer_t *) malloc(sizeof(tokenizer_t)); + + // Initialize the tokenizer fields + tokenizer->source = NULL; + tokenizer->source_len = 0; + tokenizer->source_pos = 0; + tokenizer->delimiter = delimiter; + tokenizer->comment = comment; + tokenizer->quotechar = quotechar; + tokenizer->expchar = expchar; + tokenizer->output_cols = NULL; + tokenizer->col_ptrs = NULL; + tokenizer->output_len = NULL; + tokenizer->num_cols = 0; + tokenizer->num_rows = 0; + tokenizer->fill_extra_cols = fill_extra_cols; + tokenizer->state = START_LINE; + tokenizer->code = NO_ERROR; + tokenizer->iter_col = 0; + tokenizer->curr_pos = NULL; + tokenizer->strip_whitespace_lines = strip_whitespace_lines; + tokenizer->strip_whitespace_fields = strip_whitespace_fields; + tokenizer->use_fast_converter = use_fast_converter; + tokenizer->comment_lines = (char *) malloc(INITIAL_COMMENT_LEN); + tokenizer->comment_pos = 0; + tokenizer->comment_lines_len = 0; + + // This is a bit of a hack -- buf holds an empty string to represent + // empty field values + tokenizer->buf = calloc(2, sizeof(char)); + + return tokenizer; +} + + +void delete_data(tokenizer_t *tokenizer) +{ + // Don't free tokenizer->source because it points to part of + // an already freed Python object + int i; + + if (tokenizer->output_cols) + { + for (i = 0; i < tokenizer->num_cols; ++i) + { + free(tokenizer->output_cols[i]); + } + } + + free(tokenizer->output_cols); + free(tokenizer->col_ptrs); + free(tokenizer->output_len); + + // Set pointers to 0 so we don't use freed memory when reading over again + tokenizer->output_cols = 0; + tokenizer->col_ptrs = 0; + tokenizer->output_len = 0; +} + + +void delete_tokenizer(tokenizer_t *tokenizer) +{ + delete_data(tokenizer); + free(tokenizer->comment_lines); + free(tokenizer->buf); + free(tokenizer); +} + + +void resize_col(tokenizer_t *self, int index) +{ + // Temporarily store the position in output_cols[index] to + // which col_ptrs[index] points + long diff = self->col_ptrs[index] - self->output_cols[index]; + + // Double the size of the column string + self->output_cols[index] = (char *) realloc(self->output_cols[index], 2 * + self->output_len[index] * sizeof(char)); + + // Set the second (newly allocated) half of the column string to all zeros + memset(self->output_cols[index] + self->output_len[index] * sizeof(char), 0, + self->output_len[index] * sizeof(char)); + + self->output_len[index] *= 2; + // realloc() might move the address in memory, so we have to move + // col_ptrs[index] to an offset of the new address + self->col_ptrs[index] = self->output_cols[index] + diff; +} + + +void resize_comments(tokenizer_t *self) +{ + // Double the size of the comments string + self->comment_lines = (char *) realloc(self->comment_lines, + self->comment_pos + 1); + // Set the second (newly allocated) half of the column string to all zeros + memset(self->comment_lines + self->comment_lines_len * sizeof(char), 0, + (self->comment_pos + 1 - self->comment_lines_len) * sizeof(char)); + + self->comment_lines_len = self->comment_pos + 1; +} + +/* + Resize the column string if necessary and then append c to the + end of the column string, incrementing the column position pointer. +*/ +static inline void push(tokenizer_t *self, char c, int col) +{ + if (self->col_ptrs[col] - self->output_cols[col] >= + self->output_len[col]) + { + resize_col(self, col); + } + + *self->col_ptrs[col]++ = c; +} + + +/* + Resize the comment string if necessary and then append c to the + end of the comment string. +*/ +static inline void push_comment(tokenizer_t *self, char c) +{ + if (self->comment_pos >= self->comment_lines_len) + { + resize_comments(self); + } + self->comment_lines[self->comment_pos++] = c; +} + + +static inline void end_comment(tokenizer_t *self) +{ + // Signal empty comment by inserting \x01 + if (self->comment_pos == 0 || self->comment_lines[self->comment_pos - 1] == '\x00') + { + push_comment(self, '\x01'); + } + push_comment(self, '\x00'); +} + + +#define PUSH(c) push(self, c, col) + + +/* Set the state to START_FIELD and begin with the assumption that + the field is entirely whitespace in order to handle the possibility + that the comment character is found before any non-whitespace even + if whitespace stripping is disabled. +*/ +#define BEGIN_FIELD() \ + self->state = START_FIELD; \ + whitespace = 1 + + +/* + First, backtrack to eliminate trailing whitespace if strip_whitespace_fields + is true. If the field is empty, push '\x01' as a marker. + Append a null byte to the end of the column string as a field delimiting marker. + Increment the variable col if we are tokenizing data. +*/ +static inline void end_field(tokenizer_t *self, int *col, int header) +{ + if (self->strip_whitespace_fields && + self->col_ptrs[*col] != self->output_cols[*col]) + { + --self->col_ptrs[*col]; + while (*self->col_ptrs[*col] == ' ' || *self->col_ptrs[*col] == '\t') + { + *self->col_ptrs[*col]-- = '\x00'; + } + ++self->col_ptrs[*col]; + } + if (self->col_ptrs[*col] == self->output_cols[*col] || + self->col_ptrs[*col][-1] == '\x00') + { + push(self, '\x01', *col); + } + push(self, '\x00', *col); + if (!header) { + ++*col; + } +} + + +#define END_FIELD() end_field(self, &col, header) + + +// Set the error code to c for later retrieval and return c +#define RETURN(c) \ + do { \ + self->code = c; \ + return c; \ + } while (0) + + +/* + If we are tokenizing the header, end after the first line. + Handle the possibility of insufficient columns appropriately; + if fill_extra_cols=1, then append empty fields, but otherwise + return an error. Increment our row count and possibly end if + all the necessary rows have already been parsed. +*/ +static inline int end_line(tokenizer_t *self, int col, int header, int end, + tokenizer_state *old_state) +{ + if (header) + { + ++self->source_pos; + RETURN(NO_ERROR); + } + else if (self->fill_extra_cols) + { + while (col < self->num_cols) + { + PUSH('\x01'); + END_FIELD(); + } + } + else if (col < self->num_cols) + { + RETURN(NOT_ENOUGH_COLS); + } + + ++self->num_rows; + *old_state = START_LINE; + + if (end != -1 && self->num_rows == end) + { + ++self->source_pos; + RETURN(NO_ERROR); + } + return -1; +} + + +#define END_LINE() if (end_line(self, col, header, end, &old_state) != -1) return self->code + + +int skip_lines(tokenizer_t *self, int offset, int header) +{ + int signif_chars = 0; + int comment = 0; + int i = 0; + char c; + + while (i < offset) + { + if (self->source_pos >= self->source_len) + { + if (header) + RETURN(INVALID_LINE); // header line is required + else + RETURN(NO_ERROR); // no data in input + } + + c = self->source[self->source_pos]; + + if (c == '\r' || c == '\n') + { + if (c == '\r' && self->source_pos < self->source_len - 1 && + self->source[self->source_pos + 1] == '\n') + { + ++self->source_pos; // skip \n in \r\n + } + if (!comment && signif_chars > 0) + ++i; + else if (comment && !header) + end_comment(self); + // Start by assuming a line is empty and non-commented + signif_chars = 0; + comment = 0; + } + else if ((c != ' ' && c != '\t') || !self->strip_whitespace_lines) + { + // comment line + if (!signif_chars && self->comment != 0 && c == self->comment) + comment = 1; + else if (comment && !header) + push_comment(self, c); + + // significant character encountered + ++signif_chars; + } + else if (comment && !header) + { + push_comment(self, c); + } + + ++self->source_pos; + } + + RETURN(NO_ERROR); +} + + +int tokenize(tokenizer_t *self, int end, int header, int num_cols) +{ + char c; // input character + int col = 0; // current column ignoring possibly excluded columns + tokenizer_state old_state = START_LINE; // last state the tokenizer was in before CR mode + int parse_newline = 0; // explicit flag to treat current char as a newline + int i = 0; + int whitespace = 1; + delete_data(self); // clear old reading data + self->num_rows = 0; + self->comment_lines_len = INITIAL_COMMENT_LEN; + + if (header) + self->num_cols = 1; // store header output in one column + else + self->num_cols = num_cols; + + // Allocate memory for structures used during tokenization + self->output_cols = (char **) malloc(self->num_cols * sizeof(char *)); + self->col_ptrs = (char **) malloc(self->num_cols * sizeof(char *)); + self->output_len = (int *) malloc(self->num_cols * sizeof(int)); + + for (i = 0; i < self->num_cols; ++i) + { + self->output_cols[i] = (char *) calloc(1, INITIAL_COL_SIZE * + sizeof(char)); + // Make each col_ptrs pointer point to the beginning of the + // column string + self->col_ptrs[i] = self->output_cols[i]; + self->output_len[i] = INITIAL_COL_SIZE; + } + + if (end == 0) + RETURN(NO_ERROR); // don't read if end == 0 + + self->state = START_LINE; + + // Loop until all of self->source has been read + while (self->source_pos < self->source_len + 1) + { + if (self->source_pos == self->source_len || parse_newline) + c = '\n'; + else + c = self->source[self->source_pos]; + + if (c == '\r') + c = '\n'; + + parse_newline = 0; + + switch (self->state) + { + case START_LINE: + if (c == '\n') + break; + else if ((c == ' ' || c == '\t') && self->strip_whitespace_lines) + break; + else if (self->comment != 0 && c == self->comment) + { + // comment line; ignore + self->state = COMMENT; + break; + } + // initialize variables for the beginning of line parsing + col = 0; + BEGIN_FIELD(); + // parse in mode START_FIELD + + case START_FIELD: + // strip whitespace before field begins + if ((c == ' ' || c == '\t') && self->strip_whitespace_fields) + break; + else if (!self->strip_whitespace_lines && self->comment != 0 && + c == self->comment) + { + // comment line, not caught earlier because of no stripping + self->state = COMMENT; + break; + } + else if (c == self->delimiter) // field ends before it begins + { + if (col >= self->num_cols) + RETURN(TOO_MANY_COLS); + END_FIELD(); + BEGIN_FIELD(); + break; + } + else if (c == '\n') + { + if (self->strip_whitespace_lines) + { + // Move on if the delimiter is whitespace, e.g. + // '1 2 3 '->['1','2','3'] + if (self->delimiter == ' ' || self->delimiter == '\t') + ; + // Register an empty field if non-whitespace delimiter, + // e.g. '1,2, '->['1','2',''] + else + { + if (col >= self->num_cols) + RETURN(TOO_MANY_COLS); + END_FIELD(); + } + } + + else if (!self->strip_whitespace_lines) + { + // In this case we don't want to left-strip the field, + // so we backtrack + size_t tmp = self->source_pos; + --self->source_pos; + + while (self->source_pos >= 0 && + self->source[self->source_pos] != self->delimiter + && self->source[self->source_pos] != '\n' + && self->source[self->source_pos] != '\r') + { + --self->source_pos; + } + + // backtracked to line beginning + if (self->source_pos == -1 + || self->source[self->source_pos] == '\n' + || self->source[self->source_pos] == '\r') + { + self->source_pos = tmp; + } + else + { + ++self->source_pos; + + if (self->source_pos == tmp) + // no whitespace, just an empty field + ; + + else + while (self->source_pos < tmp) + { + // append whitespace characters + PUSH(self->source[self->source_pos]); + ++self->source_pos; + } + + if (col >= self->num_cols) + RETURN(TOO_MANY_COLS); + END_FIELD(); // whitespace counts as a field + } + } + + END_LINE(); + self->state = START_LINE; + break; + } + else if (c == self->quotechar) // start parsing quoted field + { + self->state = START_QUOTED_FIELD; + break; + } + else + { + if (col >= self->num_cols) + RETURN(TOO_MANY_COLS); + // Valid field character, parse again in FIELD mode + self->state = FIELD; + } + + case FIELD: + if (self->comment != 0 && c == self->comment && whitespace && col == 0) + { + // No whitespace stripping, but the comment char is found + // before any data, e.g. ' # a b c' + self->state = COMMENT; + } + else if (c == self->delimiter) + { + // End of field, look for new field + END_FIELD(); + BEGIN_FIELD(); + } + else if (c == '\n') + { + // Line ending, stop parsing both field and line + END_FIELD(); + END_LINE(); + self->state = START_LINE; + } + else + { + if (c != ' ' && c != '\t') + whitespace = 0; // field is not all whitespace + PUSH(c); + } + break; + + case START_QUOTED_FIELD: + if ((c == ' ' || c == '\t') && self->strip_whitespace_fields) + { + // ignore initial whitespace + break; + } + else if (c == self->quotechar) // empty quotes + { + self->state = FIELD; // parse the rest of the field normally + } + else + { + // Valid field character, parse again in QUOTED_FIELD mode + self->state = QUOTED_FIELD; + } + + case QUOTED_FIELD_NEWLINE: + if (self->state == QUOTED_FIELD) + ; // fall through + // Ignore initial whitespace if strip_whitespace_lines and + // newlines regardless + else if (((c == ' ' || c == '\t') && self->strip_whitespace_lines) + || c == '\n') + break; + else if (c == self->quotechar) + { + self->state = FIELD; + break; + } + else + { + // Once data begins, parse it as a normal quoted field + self->state = QUOTED_FIELD; + } + + case QUOTED_FIELD: + if (c == self->quotechar) // Parse rest of field normally, e.g. "ab"c + self->state = FIELD; + else if (c == '\n') + self->state = QUOTED_FIELD_NEWLINE; + else + { + PUSH(c); + } + break; + + case COMMENT: + if (c == '\n') + { + self->state = START_LINE; + if (!header) + end_comment(self); + } + else if (!header) + push_comment(self, c); + break; // keep looping until we find a newline + + } + + ++self->source_pos; + } + + RETURN(0); +} + + +// Lower-case a single C locale character +static inline int ascii_tolower(int c) +{ + if (c >= 'A' || c <= 'Z') + { + return c + ('a' - 'A'); + } + + return c; +} + + +static int ascii_strncasecmp(const char *str1, const char *str2, size_t n) +{ + int char1, char2; + + do + { + char1 = tolower(*(str1++)); + char2 = tolower(*(str2++)); + n--; + } while (n && char1 != '\0' && char1 == char2); + + return (char1 - char2); +} + + +long str_to_long(tokenizer_t *self, char *str) +{ + char *tmp; + long ret; + errno = 0; + ret = strtol(str, &tmp, 10); + + if (tmp == str || *tmp != '\0') + self->code = CONVERSION_ERROR; + else if (errno == ERANGE) + self->code = OVERFLOW_ERROR; + + return ret; +} + + +double str_to_double(tokenizer_t *self, char *str) +{ + char *tmp; + double val; + errno = 0; + + if (self->use_fast_converter) + { + val = xstrtod(str, &tmp, '.', self->expchar, ',', 1); + + if (*tmp) + { + goto conversion_error; + } + else if (errno == ERANGE) + { + self->code = OVERFLOW_ERROR; + } + else if (errno == EDOM) // xstrtod signalling invalid exponents + { + self->code = CONVERSION_ERROR; + } + + return val; + } + + else + { + val = strtod(str, &tmp); + + if (errno == EINVAL || tmp == str || *tmp != '\0') + { + goto conversion_error; + } + else if (errno == ERANGE) + { + self->code = OVERFLOW_ERROR; + } + else if (errno == EDOM) + { + self->code = CONVERSION_ERROR; + } + + return val; + } + +conversion_error: + // Handle inf and nan values for xstrtod and platforms whose strtod + // doesn't support this + val = 1.0; + tmp = str; + + if (*tmp == '+') + { + tmp++; + } + else if (*tmp == '-') + { + tmp++; + val = -1.0; + } + + if (0 == ascii_strncasecmp(tmp, "nan", 3)) + { + // Handle optional nan type specifier; this is ignored + tmp += 3; + val = NAN; + } + else if (0 == ascii_strncasecmp(tmp, "inf", 3)) + { + tmp += 3; + if (0 == ascii_strncasecmp(tmp, "inity", 5)) + { + tmp += 5; + } + val *= INFINITY; + } + + if (tmp == str || *tmp != '\0') + { + self->code = CONVERSION_ERROR; + val = 0; + } + + return val; +} + +// --------------------------------------------------------------------------- +// Implementation of xstrtod + +// +// strtod.c +// +// Convert string to double +// +// Copyright (C) 2002 Michael Ringgaard. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// 3. Neither the name of the project nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +// SUCH DAMAGE. +// +// ----------------------------------------------------------------------- +// Modifications by Warren Weckesser, March 2011: +// * Rename strtod() to xstrtod(). +// * Added decimal and sci arguments. +// * Skip trailing spaces. +// * Commented out the other functions. +// Modifications by Richard T Guy, August 2013: +// * Add tsep argument for thousands separator +// Modifications by Michael Mueller, August 2014: +// * Cache powers of 10 in memory to avoid rounding errors +// * Stop parsing decimals after 17 significant figures +// Modifications by Derek Homeier, August 2015: +// * Recognise alternative exponent characters passed in 'sci'; try automatic +// detection of allowed Fortran formats with sci='A' +// * Require exactly 3 digits in exponent for Fortran-type format '8.7654+321' +// Modifications by Derek Homeier, September-December 2016: +// * Fixed some corner cases of very large or small exponents; proper return +// * do not increment num_digits until nonzero digit read in +// + +double xstrtod(const char *str, char **endptr, char decimal, + char expchar, char tsep, int skip_trailing) +{ + double number; + int exponent; + int negative; + char *p = (char *) str; + char exp; + char sci; + int num_digits; + int num_decimals; + int max_digits = 17; + int num_exp = 3; + int non_zero; + int n; + // Cache powers of 10 in memory + static double e[] = {1., 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, + 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, 1e20, + 1e21, 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29, 1e30, + 1e31, 1e32, 1e33, 1e34, 1e35, 1e36, 1e37, 1e38, 1e39, 1e40, + 1e41, 1e42, 1e43, 1e44, 1e45, 1e46, 1e47, 1e48, 1e49, 1e50, + 1e51, 1e52, 1e53, 1e54, 1e55, 1e56, 1e57, 1e58, 1e59, 1e60, + 1e61, 1e62, 1e63, 1e64, 1e65, 1e66, 1e67, 1e68, 1e69, 1e70, + 1e71, 1e72, 1e73, 1e74, 1e75, 1e76, 1e77, 1e78, 1e79, 1e80, + 1e81, 1e82, 1e83, 1e84, 1e85, 1e86, 1e87, 1e88, 1e89, 1e90, + 1e91, 1e92, 1e93, 1e94, 1e95, 1e96, 1e97, 1e98, 1e99, 1e100, + 1e101, 1e102, 1e103, 1e104, 1e105, 1e106, 1e107, 1e108, 1e109, 1e110, + 1e111, 1e112, 1e113, 1e114, 1e115, 1e116, 1e117, 1e118, 1e119, 1e120, + 1e121, 1e122, 1e123, 1e124, 1e125, 1e126, 1e127, 1e128, 1e129, 1e130, + 1e131, 1e132, 1e133, 1e134, 1e135, 1e136, 1e137, 1e138, 1e139, 1e140, + 1e141, 1e142, 1e143, 1e144, 1e145, 1e146, 1e147, 1e148, 1e149, 1e150, + 1e151, 1e152, 1e153, 1e154, 1e155, 1e156, 1e157, 1e158, 1e159, 1e160, + 1e161, 1e162, 1e163, 1e164, 1e165, 1e166, 1e167, 1e168, 1e169, 1e170, + 1e171, 1e172, 1e173, 1e174, 1e175, 1e176, 1e177, 1e178, 1e179, 1e180, + 1e181, 1e182, 1e183, 1e184, 1e185, 1e186, 1e187, 1e188, 1e189, 1e190, + 1e191, 1e192, 1e193, 1e194, 1e195, 1e196, 1e197, 1e198, 1e199, 1e200, + 1e201, 1e202, 1e203, 1e204, 1e205, 1e206, 1e207, 1e208, 1e209, 1e210, + 1e211, 1e212, 1e213, 1e214, 1e215, 1e216, 1e217, 1e218, 1e219, 1e220, + 1e221, 1e222, 1e223, 1e224, 1e225, 1e226, 1e227, 1e228, 1e229, 1e230, + 1e231, 1e232, 1e233, 1e234, 1e235, 1e236, 1e237, 1e238, 1e239, 1e240, + 1e241, 1e242, 1e243, 1e244, 1e245, 1e246, 1e247, 1e248, 1e249, 1e250, + 1e251, 1e252, 1e253, 1e254, 1e255, 1e256, 1e257, 1e258, 1e259, 1e260, + 1e261, 1e262, 1e263, 1e264, 1e265, 1e266, 1e267, 1e268, 1e269, 1e270, + 1e271, 1e272, 1e273, 1e274, 1e275, 1e276, 1e277, 1e278, 1e279, 1e280, + 1e281, 1e282, 1e283, 1e284, 1e285, 1e286, 1e287, 1e288, 1e289, 1e290, + 1e291, 1e292, 1e293, 1e294, 1e295, 1e296, 1e297, 1e298, 1e299, 1e300, + 1e301, 1e302, 1e303, 1e304, 1e305, 1e306, 1e307, 1e308}; + // Cache additional negative powers of 10 + /* static double m[] = {1e-309, 1e-310, 1e-311, 1e-312, 1e-313, 1e-314, + 1e-315, 1e-316, 1e-317, 1e-318, 1e-319, 1e-320, + 1e-321, 1e-322, 1e-323}; */ + errno = 0; + + // Skip leading whitespace + while (isspace(*p)) p++; + + // Handle optional sign + negative = 0; + switch (*p) + { + case '-': negative = 1; // Fall through to increment position + case '+': p++; + } + + number = 0.; + exponent = 0; + num_digits = 0; + num_decimals = 0; + non_zero = 0; + + // Process string of digits + while (isdigit(*p)) + { + if (num_digits < max_digits) + { + number = number * 10. + (*p - '0'); + non_zero += (*p != '0'); + if(non_zero) num_digits++; + } + else + ++exponent; + + p++; + p += (tsep != '\0' && *p == tsep); + } + + // Process decimal part + if (*p == decimal) + { + p++; + + while (num_digits < max_digits && isdigit(*p)) + { + number = number * 10. + (*p - '0'); + non_zero += (*p != '0'); + if(non_zero) num_digits++; + num_decimals++; + p++; + } + + if (num_digits >= max_digits) // consume extra decimal digits + while (isdigit(*p)) + ++p; + + exponent -= num_decimals; + } + + if (num_digits == 0) + { + errno = ERANGE; + number = 0.0; + } + + // Correct for sign + if (negative) number = -number; + + // Process an exponent string + sci = toupper(expchar); + if (sci == 'A') + { + // check for possible Fortran exponential notations, including + // triple-digits with no character + exp = toupper(*p); + if (exp == 'E' || exp == 'D' || exp == 'Q' || *p == '+' || *p == '-') + { + // Handle optional sign + negative = 0; + switch (exp) + { + case '-': + negative = 1; // Fall through to increment pos + case '+': + p++; + break; + case 'E': + case 'D': + case 'Q': + switch (*++p) + { + case '-': + negative = 1; // Fall through to increment pos + case '+': + p++; + } + } + + // Process string of digits + n = 0; + while (isdigit(*p)) + { + n = n * 10 + (*p - '0'); + num_exp--; + p++; + } + // Trigger error if not exactly three digits + if (num_exp != 0 && (exp == '+' || exp == '-')) + { + errno = EDOM; + number = 0.0; + } + + if (negative) + exponent -= n; + else + exponent += n; + } + } + else if (toupper(*p) == sci) + { + // Handle optional sign + negative = 0; + switch (*++p) + { + case '-': + negative = 1; // Fall through to increment pos + case '+': + p++; + } + + // Process string of digits + n = 0; + while (isdigit(*p)) + { + n = n * 10 + (*p - '0'); + p++; + } + + if (negative) + exponent -= n; + else + exponent += n; + } + + // largest representable float64 is 1.7977e+308, closest to 0 ~4.94e-324, + // but multiplying exponents in in two steps gives slightly better precision + if (number != 0.0) { + if (exponent > 305) + { + if (exponent > 308) // leading zeros already subtracted from exp + number *= HUGE_VAL; + else + { + number *= e[exponent-300]; + number *= 1.e300; + } + } + else if (exponent < -308) // subnormal + { + if (exponent < -616) // prevent invalid array access + number = 0.; + else + { + number /= e[-308-exponent]; + number *= 1.e-308; + } + // trigger warning if resolution is > ~1.e-15; + // strtod does so for |number| <~ 2.25e-308 + // if (number > -4.94e-309 && number < 4.94e-309) + errno = ERANGE; + } + else if (exponent > 0) + number *= e[exponent]; + else if (exponent < 0) + number /= e[-exponent]; + + if (number == HUGE_VAL || number == -HUGE_VAL) + errno = ERANGE; + } + + if (skip_trailing) { + // Skip trailing whitespace + while (isspace(*p)) p++; + } + + if (endptr) *endptr = p; + return number; +} + + +void start_iteration(tokenizer_t *self, int col) +{ + // Begin looping over the column string with index col + self->iter_col = col; + // Start at the initial pointer position + self->curr_pos = self->output_cols[col]; +} + + +char *next_field(tokenizer_t *self, int *size) +{ + char *tmp = self->curr_pos; + + // pass through the entire field until reaching the delimiter + while (*self->curr_pos != '\x00') + ++self->curr_pos; + + ++self->curr_pos; // next field begins after the delimiter + + if (*tmp == '\x01') // empty field; this is a hack + { + if (size) + *size = 0; + return self->buf; + } + + else + { + if (size) + *size = self->curr_pos - tmp - 1; + return tmp; + } +} + + +char *get_line(char *ptr, size_t *len, size_t map_len) +{ + size_t pos = 0; + + while (pos < map_len) + { + if (ptr[pos] == '\r') + { + *len = pos; + // Windows line break (\r\n) + if (pos != map_len - 1 && ptr[pos + 1] == '\n') + return ptr + pos + 2; // skip newline character + else // Carriage return line break + return ptr + pos + 1; + } + + else if (ptr[pos] == '\n') + { + *len = pos; + return ptr + pos + 1; + } + + ++pos; + } + + // done with input + return 0; +} + + +void reset_comments(tokenizer_t *self) +{ + free(self->comment_lines); + self->comment_pos = 0; + self->comment_lines_len = INITIAL_COMMENT_LEN; + self->comment_lines = (char *) malloc(INITIAL_COMMENT_LEN); +} diff --git a/astropy/io/ascii/src/tokenizer.h b/astropy/io/ascii/src/tokenizer.h new file mode 100644 index 0000000..6a1ddce --- /dev/null +++ b/astropy/io/ascii/src/tokenizer.h @@ -0,0 +1,113 @@ +// Licensed under a 3-clause BSD style license - see LICENSE.rst + +#ifndef TOKENIZER_H +#define TOKENIZER_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + #define inline __inline + #ifndef NAN + static const unsigned long __nan[2] = {0xffffffff, 0x7fffffff}; + #define NAN (*(const double *) __nan) + #endif + #ifndef INFINITY + static const unsigned long __infinity[2] = {0x00000000, 0x7ff00000}; + #define INFINITY (*(const double *) __infinity) + #endif +#else + #ifndef INFINITY + #define INFINITY (1.0/0.0) + #endif + #ifndef NAN + #define NAN (INFINITY-INFINITY) + #endif +#endif + +typedef enum +{ + START_LINE = 0, + START_FIELD, + START_QUOTED_FIELD, + FIELD, + QUOTED_FIELD, + QUOTED_FIELD_NEWLINE, + COMMENT, +} tokenizer_state; + +typedef enum +{ + NO_ERROR, + INVALID_LINE, + TOO_MANY_COLS, + NOT_ENOUGH_COLS, + CONVERSION_ERROR, + OVERFLOW_ERROR +} err_code; + +typedef struct +{ + char *source; // single string containing all of the input + size_t source_len; // length of the input + size_t source_pos; // current index in source for tokenization + char delimiter; // delimiter character + char comment; // comment character + char quotechar; // quote character + char expchar; // exponential character in scientific notation + char **output_cols; // array of output strings for each column + char **col_ptrs; // array of pointers to current output position for each col + int *output_len; // length of each output column string + int num_cols; // number of table columns + int num_rows; // number of table rows + int fill_extra_cols; // represents whether or not to fill rows with too few values + tokenizer_state state; // current state of the tokenizer + err_code code; // represents the latest error that has occurred + int iter_col; // index of the column being iterated over + char *curr_pos; // current iteration position + char *buf; // buffer for empty data + int strip_whitespace_lines; // whether to strip whitespace at the beginning and end of lines + int strip_whitespace_fields; // whether to strip whitespace at the beginning and end of fields + int use_fast_converter; // whether to use the fast converter for floats + char *comment_lines; // single null-delimited string containing comment lines + int comment_lines_len; // length of comment_lines in memory + int comment_pos; // current index in comment_lines +} tokenizer_t; + +/* +Example input/output +-------------------- + +source: "A,B,C\n10,5.,6\n1,2,3" +output_cols: ["A\x0010\x001", "B\x005.\x002", "C\x006\x003"] +*/ + +#define INITIAL_COL_SIZE 500 +#define INITIAL_COMMENT_LEN 50 + +tokenizer_t *create_tokenizer(char delimiter, char comment, char quotechar, char expchar, + int fill_extra_cols, int strip_whitespace_lines, + int strip_whitespace_fields, int use_fast_converter); +void delete_tokenizer(tokenizer_t *tokenizer); +void delete_data(tokenizer_t *tokenizer); +void resize_col(tokenizer_t *self, int index); +void resize_comments(tokenizer_t *self); +int skip_lines(tokenizer_t *self, int offset, int header); +int tokenize(tokenizer_t *self, int end, int header, int num_cols); +long str_to_long(tokenizer_t *self, char *str); +double str_to_double(tokenizer_t *self, char *str); +double xstrtod(const char *str, char **endptr, char decimal, + char expchar, char tsep, int skip_trailing); +void start_iteration(tokenizer_t *self, int col); +char *next_field(tokenizer_t *self, int *size); +long file_len(FILE *fhandle); +char *get_line(char *ptr, size_t *len, size_t map_len); +void reset_comments(tokenizer_t *self); + +#endif diff --git a/astropy/io/ascii/tests/__init__.py b/astropy/io/ascii/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/astropy/io/ascii/tests/common.py b/astropy/io/ascii/tests/common.py new file mode 100644 index 0000000..0dbb560 --- /dev/null +++ b/astropy/io/ascii/tests/common.py @@ -0,0 +1,109 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import absolute_import + +import os + + +import numpy as np + + +__all__ = ['raises', 'assert_equal', 'assert_almost_equal', + 'assert_true', 'setup_function', 'teardown_function', + 'has_isnan'] + +CWD = os.getcwd() +TEST_DIR = os.path.dirname(__file__) + +has_isnan = True +try: + from math import isnan # pylint: disable=W0611 +except ImportError: + try: + from numpy import isnan # pylint: disable=W0611 + except ImportError: + has_isnan = False + print('Tests requiring isnan will fail') + + +def setup_function(function): + os.chdir(TEST_DIR) + + +def teardown_function(function): + os.chdir(CWD) + + +# Compatibility functions to convert from nose to py.test +def assert_equal(a, b): + assert a == b + + +def assert_almost_equal(a, b, **kwargs): + assert np.allclose(a, b, **kwargs) + + +def assert_true(a): + assert a + + +def make_decorator(func): + """ + Wraps a test decorator so as to properly replicate metadata + of the decorated function, including nose's additional stuff + (namely, setup and teardown). + """ + def decorate(newfunc): + if hasattr(func, 'compat_func_name'): + name = func.compat_func_name + else: + name = func.__name__ + newfunc.__dict__ = func.__dict__ + newfunc.__doc__ = func.__doc__ + newfunc.__module__ = func.__module__ + if not hasattr(newfunc, 'compat_co_firstlineno'): + try: + newfunc.compat_co_firstlineno = func.func_code.co_firstlineno + except AttributeError: + newfunc.compat_co_firstlineno = func.__code__.co_firstlineno + try: + newfunc.__name__ = name + except TypeError: + # can't set func name in 2.3 + newfunc.compat_func_name = name + return newfunc + return decorate + + +def raises(*exceptions): + """Test must raise one of expected exceptions to pass. + + Example use:: + + @raises(TypeError, ValueError) + def test_raises_type_error(): + raise TypeError("This test passes") + + @raises(Exception) + def test_that_fails_by_passing(): + pass + + If you want to test many assertions about exceptions in a single test, + you may want to use `assert_raises` instead. + """ + valid = ' or '.join([e.__name__ for e in exceptions]) + + def decorate(func): + name = func.__name__ + + def newfunc(*arg, **kw): + try: + func(*arg, **kw) + except exceptions: + pass + else: + message = "{}() did not raise {}".format(name, valid) + raise AssertionError(message) + newfunc = make_decorator(func)(newfunc) + return newfunc + return decorate diff --git a/astropy/io/ascii/tests/t/apostrophe.rdb b/astropy/io/ascii/tests/t/apostrophe.rdb new file mode 100644 index 0000000..570491e --- /dev/null +++ b/astropy/io/ascii/tests/t/apostrophe.rdb @@ -0,0 +1,6 @@ +# first comment +agasc_id n_noids n_obs +11S N N +jean's 1 1 + # second comment +335416352 3 8 diff --git a/astropy/io/ascii/tests/t/apostrophe.tab b/astropy/io/ascii/tests/t/apostrophe.tab new file mode 100644 index 0000000..66c4b59 --- /dev/null +++ b/astropy/io/ascii/tests/t/apostrophe.tab @@ -0,0 +1,3 @@ +agasc_id n_noids n_obs +jean's 1 1 +335416352 3 8 diff --git a/astropy/io/ascii/tests/t/bad.txt b/astropy/io/ascii/tests/t/bad.txt new file mode 100644 index 0000000..467e347 --- /dev/null +++ b/astropy/io/ascii/tests/t/bad.txt @@ -0,0 +1,6 @@ +# Extra column in last line + "test 1a" test2 test3 test4 + # fun1 fun2 fun3 fun4 + top1 top2 top3 top4 +hat1 hat2 hat3 hat4 hat5 + diff --git a/astropy/io/ascii/tests/t/bars_at_ends.txt b/astropy/io/ascii/tests/t/bars_at_ends.txt new file mode 100644 index 0000000..ae651c7 --- /dev/null +++ b/astropy/io/ascii/tests/t/bars_at_ends.txt @@ -0,0 +1,4 @@ +|obsid | redshift | X | Y | object | rad| +|3102 | 0.32 | 4167 | 4085 | Q1250+568-A | 9| +|3102 | 0.32 | 4706 | 3916 | Q1250+568-B | 14 | +|877 | 0.22 | 4378 | 3892 | 'Source 82' | 12.5 | diff --git a/astropy/io/ascii/tests/t/cds.dat b/astropy/io/ascii/tests/t/cds.dat new file mode 100644 index 0000000..7631695 --- /dev/null +++ b/astropy/io/ascii/tests/t/cds.dat @@ -0,0 +1,38 @@ + + + + +Title: Spitzer Observations of NGC 1333: A Study of Structure and Evolution + in a Nearby Embedded Cluster +Authors: Gutermuth R.A., Myers P.C., Megeath S.T., Allen L.E., Pipher J.L., + Muzerolle J., Porras A., Winston E., Fazio G. +Table: Spitzer-identified YSOs: Addendum +================================================================================ +Byte-by-byte Description of file: datafile3.txt +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 3 I3 --- Index Running identification number + 5- 6 I2 h RAh Hour of Right Ascension (J2000) + 8- 9 I2 min RAm Minute of Right Ascension (J2000) + 11- 15 F5.2 s RAs Second of Right Ascension (J2000) + - continuation of description + 17 A1 --- DE- Sign of the Declination (J2000) + 18- 19 I2 deg DEd Degree of Declination (J2000) + 21- 22 I2 arcmin DEm Arcminute of Declination (J2000) + 24- 27 F4.1 arcsec DEs Arcsecond of Declination (J2000) + 29- 68 A40 --- Match Literature match + 70- 75 A6 --- Class Source classification (1) + 77-80 F4.2 mag AK ? The K band extinction (2) + 82-86 F5.2 --- Fit ? Fit of IRAC photometry (3) +-------------------------------------------------------------------------------- +Note (1): Asterisks mark "deeply embedded" sources with questionable IRAC + colors or incomplete IRAC photometry and relatively bright + MIPS 24 micron photometry. +Note (2): Only provided for sources with valid JHK_S_ photometry. +Note (3): Defined as the slope of the linear least squares fit to the + 3.6 - 8.0 micron SEDs in log{lambda} F_{lambda} vs log{lambda} space. + Extinction is not accounted for in these values. High extinction can + bias Fit to higher values. +-------------------------------------------------------------------------------- + 1 03 28 39.09 +31 06 01.9 I* 1.35 diff --git a/astropy/io/ascii/tests/t/cds/description/ReadMe b/astropy/io/ascii/tests/t/cds/description/ReadMe new file mode 100644 index 0000000..999dec2 --- /dev/null +++ b/astropy/io/ascii/tests/t/cds/description/ReadMe @@ -0,0 +1,67 @@ +J/A+A/511/A56 Abundances of five open clusters (Pancino+, 2010) +================================================================================ +Chemical abundance analysis of the open clusters Cr 110, NGC 2420, NGC 7789, +and M 67 (NGC 2682). + Pancino E., Carrera R., Rossetti, E., Gallart C. + + =2010A&A...511A..56P +================================================================================ +ADC_Keywords: Clusters, open ; Stars, giant ; Equivalent widths ; Spectroscopy +Keywords: stars: abundances - Galaxy: disk - + open clusters and associations: general + +Abstract: + The present number of Galactic open clusters that have high resolution + abundance determinations, not only of [Fe/H], but also of other key + elements, is largely insufficient to enable a clear modeling of the + Galactic disk chemical evolution. To increase the number of Galactic + open clusters with high quality measurements, we obtained high + resolution (R~30000), high quality (S/N~50-100 per pixel), echelle + spectra with the fiber spectrograph FOCES, at Calar Alto, Spain, for + three red clump stars in each of five Open Clusters. We used the + classical equivalent width analysis method to obtain accurate + abundances of sixteen elements: Al, Ba, Ca, Co, Cr, Fe, La, Mg, Na, + Nd, Ni, Sc, Si, Ti, V, and Y. We also derived the oxygen abundance + using spectral synthesis of the 6300{AA} forbidden line. + +Description: + Atomic data and equivalent widths for 15 red clump giants in 5 open + clusters: Cr 110, NGC 2099, NGC 2420, M 67, NGC 7789. + +File Summary: +-------------------------------------------------------------------------------- + FileName Lrecl Records Explanations +-------------------------------------------------------------------------------- +ReadMe 80 . This file +table1.dat 103 15 Observing logs and programme stars information +table5.dat 56 5265 Atomic data and equivalent widths +-------------------------------------------------------------------------------- + +See also: + J/A+A/455/271 : Abundances of red giants in NGC 6441 (Gratton+, 2006) + J/A+A/464/953 : Abundances of red giants in NGC 6441 (Gratton+, 2007) + J/A+A/505/117 : Abund. of red giants in 15 globular clusters (Carretta+, 2009) + +Byte-by-byte Description of file: table.dat +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 7 A7 --- Cluster Cluster name + 9- 12 I4 --- Star + 14- 20 F7.2 0.1nm Wave wave + ? Wavelength in Angstroms + 22- 23 A2 --- El a + 24 I1 --- ion ?=0 + - Ionization stage (1 for neutral element) + 26- 30 F5.2 eV chiEx Excitation potential + 32- 37 F6.2 --- loggf Logarithm of the oscillator strength + 39- 43 F5.1 0.1pm EW ?=-9.9 Equivalent width (in mA) + 46- 49 F4.1 0.1pm e_EW ?=-9.9 rms uncertainty on EW + 51- 56 F6.3 --- Q ?=-9.999 DAOSPEC quality parameter Q + (large values are bad) +-------------------------------------------------------------------------------- + +Acknowledgements: + Elena Pancino, elena.pancino(at)oabo.inaf.it +================================================================================ +(End) Elena Pancino [INAF-OABo, Italy], Patricia Vannier [CDS] 23-Nov-2009 diff --git a/astropy/io/ascii/tests/t/cds/description/table.dat b/astropy/io/ascii/tests/t/cds/description/table.dat new file mode 100644 index 0000000..f58fc29 --- /dev/null +++ b/astropy/io/ascii/tests/t/cds/description/table.dat @@ -0,0 +1,2 @@ +Cr110 2108 6696.79 Al1 4.02 -1.42 29.5 2.2 0.289 +Cr110 2108 6698.67 Al1 3.14 -1.65 58.0 2.0 0.325 diff --git a/astropy/io/ascii/tests/t/cds/glob/ReadMe b/astropy/io/ascii/tests/t/cds/glob/ReadMe new file mode 100644 index 0000000..cc680cb --- /dev/null +++ b/astropy/io/ascii/tests/t/cds/glob/ReadMe @@ -0,0 +1,572 @@ +B/cb Cataclysmic Binaries, LMXBs, and related objects (Ritter+, 2011) +================================================================================ +Catalogue of cataclysmic binaries, low-mass X-ray binaries +and related objects (7th Edition, rev. 7.14, September 2010) + Ritter H., Kolb U. + + =2003A&A...404..301R +================================================================================ +ADC_Keywords: Binaries, cataclysmic ; Binaries, X-ray ; Novae +Keywords: catalogues - stars: novae, cataclysmic variables - + stars: binaries: close + +Description (Release 7.15): + Cataclysmic Binaries are semi-detached binaries consisting of a white + dwarf or a white dwarf precursor primary and a low-mass secondary + which is filling its critical Roche lobe. The secondary is not + necessarily unevolved, it may even be a highly evolved star as for + example in the case of the AM CVn-type stars. + + Low-Mass X-Ray Binaries are semi-detached binaries consisting of + either a neutron star or a black hole primary, and a low-mass + secondary which is filling its critical Roche lobe. + + Related Objects are detached binaries consisting of either a white + dwarf or a white dwarf precursor primary and of a low-mass secondary. + The secondary may also be a highly evolved star. + + The catalogue lists coordinates, apparent magnitudes, orbital + parameters, and stellar parameters of the components and other + characteristic properties of 880 cataclysmic binaries, 98 low-mass + X-ray binaries and 319 related objects with known or suspected orbital + periods together with a comprehensive selection of the relevant recent + literature. In addition the catalogue contains a list of references to + published finding charts for 1259 of the 1297 objects, and a cross- + reference list of alias object designations. Literature published + before 1 July 2010 has, as far as possible, been taken into account. + Updated information will be provided regularly, currently every six + months. + + Old editions include catalogue (5th edition), + (6th edition) and (7th edition); + the successive versions of the 7th edition are available + in dedicated subdirectories (v7.00 tp v7.13) + +File Summary: +-------------------------------------------------------------------------------- + FileName Lrecl Records Explanations +-------------------------------------------------------------------------------- +ReadMe 80 . This file +cbdata.dat 226 880 Catalogue of Cataclysmic Binaries +lmxbdata.dat 228 98 Catalogue of Low-Mass X-Ray Binaries +pcbdata.dat 216 319 Catalogue of Related Objects +findrefs.dat 274 3230 References for finding charts +cbrefs.dat 257 1937 References for cbdata.dat +lmxbrefs.dat 236 291 References for lmxbdata.dat +pcbrefs.dat 302 655 References for pcbdata.dat +whoswho.txt 72 8927 *Names of objects, and references of designations +whoswho1.dat 199 5052 *Alternative names in lexigraphical order +whoswho2.dat 100 3595 *Provisional and Common designations +whoswho5.dat 73 1453 *References to the catalogue acronyms +-------------------------------------------------------------------------------- +Note on whoswho.txt: + contains the 3 parts whoswho1.dat to whoswho5.dat (without the bibcodes) +Note on whoswho1.dat, whoswho2.dat, whoswho5.dat: + formatted files corresponding to whoswho.txt +-------------------------------------------------------------------------------- + +See also: + http://www.MPA-Garching.MPG.DE/RKcat/ : Catalog Home page or + http://physics.open.ac.uk/RKcat/ : Catalog Home page + +Byte-by-byte Description of file: cbdata.dat +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 12 A12 --- Name Object name (G1) + 14 A1 --- whoswho [*] * indicating that further alternative + designations are in the whoswho1.dat file + 16- 27 A12 --- AltName A frequently used alternative name (G2) + 30- 31 I2 h RAh Right Ascension J2000 (hours) + 33- 34 I2 min RAm Right Ascension J2000 (minutes) + 36- 39 F4.1 s RAs [0,60]? Right Ascension J2000 (seconds) + 41 A1 --- DE- Declination J2000 (sign) + 42- 43 I2 deg DEd Declination J2000 (degrees) + 45- 46 I2 arcmin DEm Declination J2000 (minutes of arc) + 48- 49 I2 arcsec DEs [0,60]? Declination J2000 (seconds of arc) + 51 A1 arcsec epos [0-9] Position accuracy in (G3) + 53- 54 A2 --- Type1 Object type (3) + 55 A1 --- u_Type1 [?:] Uncertainty flag for object type + 57- 58 A2 --- Type2 Object type (3) + 59 A1 --- u_Type2 [?:] Uncertainty flag for object type + 61- 62 A2 --- Type3 Object type (3) + 63 A1 --- u_Type3 [?:] Uncertainty flag for Object type + 65- 66 A2 --- Type4 Object type (3) + 67 A1 --- u_Type4 [?] Uncertainty flag for Object type + 69 A1 --- l_mag1 [><] Limit flag for magnitude mag1 + 70- 73 F4.1 mag mag1 ? Apparent V (or B, b, g, R, I) magnitude + at maximum brightness (4) + 74 A1 --- f_mag1 [:BbgRiIJKprw] uncertainty flag/band for mag1 + (w="white light") + 76 A1 --- l_mag2 [><] Limit flag for magnitude mag2 + 77- 80 F4.1 mag mag2 ? Apparent V (or B, g, R) magnitude + at mideclipse (5) + 81 A1 --- f_mag2 [:?BbgRiKpw] uncertainty flag/band for mag2 + 83 A1 --- l_mag3 [><] Limit flag for magnitude mag3 + 84- 87 F4.1 mag mag3 ? Apparent V (or B, g, R) magnitude + of outbursts (6) + 88 A1 --- f_mag3 [:?BbgpRrw] uncertainty flag/band for mag3 + 90 A1 --- l_mag4 [><] Limit flag for magnitude mag4 + 91- 94 F4.1 mag mag4 ? Apparent V (or B, R) magnitude + in superoutburst (7) + 95 A1 --- f_mag4 [:?BgRUIpw] uncertainty flag/band for mag4 + 97-101 A5 d T1 Time interval between two subsequent + outbursts (8) + 103-107 A5 d T2 Time interval between two subsequent + superoutbursts (8) + 109-116 F8.6 d Orb.Per ? Orbital period, in case of object + type DQ: spectroscopic period, if it is + different from the photometric one + 117 A1 --- u_Orb.Per [:*] Uncertainty flag for Orb.Per (9) + 119-126 F8.6 d 2.__Per ? Second period (10) + 127 A1 --- u_2.__Per Uncertainty flag for 2.__Per + 128-137 F10.3 s 3.__Per ? Additional period in the system (11) + 138 A1 --- f_3.__Per [:TQ] Flag for 3.__Per (12) + 139-148 F10.3 s 4.__Per ? Additional period in the system (13) + 149 A1 --- f_4.__Per [:T] ":" uncertainty flag for 4.__Per + "T" flag indicating transient pulsations + 151 A1 --- EB [D21 ] Flag indicating the + occurrence of eclipses (G4) + 152 A1 --- u_EB [?:] Uncertainty flag for EB + 154 I1 --- SB [1,2]? Flag specifying the type of + spectroscopic binary (G5) + 155 A1 --- u_SB [:] Uncertainty flag for SB + 157-163 A7 --- SpType2 Spectral type of the secondary (G6) + 165-171 A7 --- SpType1 Spectral type of the primary (G6) + 174 A1 --- l_M1/M2 Limit flag for M1/M2 + 175-179 F5.2 --- M1/M2 ? Mass ratio M1/M2 + 180 A1 --- u_M1/M2 Uncertainty flag for M1/M2 + 183-186 F4.2 --- e_M1/M2 ? Error of M1/M2 + 188 A1 --- l_Incl Limit flag for the orbital inclination + 189-192 F4.1 deg Incl ? Orbital inclination + 193 A1 --- u_Incl Uncertainty flag for the inclination + 195-198 F4.1 deg e_Incl ? Error of orbital inclination + 200 A1 --- l_M1 Limit flag for primary mass M1 + 201-205 F5.3 solMass M1 ? Primary mass M1 + 206 A1 --- u_M1 Uncertainty flag for primary mass M1 + 208-212 F5.3 solMass e_M1 ? Error of primary mass M1 + 214 A1 --- l_M2 Limit flag for secondary mass M2 + 215-219 F5.3 solMass M2 ? Secondary mass M2 + 220 A1 --- u_M2 Uncertainty flag for secondary mass M2 + 222-226 F5.3 solMass e_M2 ? Error of secondary mass M2 +-------------------------------------------------------------------------------- +Note (3): Object type coarsely characterised using the following abbreviations: + AC = AM CVn star, spectrum devoid of hydrogen lines, subtype of NL + AM = polar = AM Her system, subtype of NL, contains a synchronously + or nearly synchronously rotating, magnetized white dwarf + AS = subtype of AM, with a slowly asynchronously rotating, magnetized + white dwarf + BD = secondary star is a brown dwarf + CP = coherent pulsator, contains a coherently pulsating white dwarf + CV = cataclysmic variable of unspecified subtype + DA = non-magnetic direct accretor + DN = dwarf nova + DQ = DQ Her star, contains a non-synchronously rotating, magnetized + white dwarf; usually not seen in X-rays + EG = extragalactic source + ER = ER UMa star = SU UMa star with an extremely short supercycle + GC = source in a globular cluster + GW = contains a pulsating white dwarf of the GW Vir = PG 1159-035 type + IP = intermediate polar, shows coherent X-ray period from a + non-synchronously spinning, magnetized white dwarf; usually a + strong X-ray source + LA = low accretion rate polar (LARP), i.e. a somewhat detached magnetic + CV/pre-CV + N = classical nova + Na = fast nova (decline from max. by 3mag in less than about 100days) + Nb = slow nova (decline from max. by 3mag in more than about 100days) + Nc = extremely slow nova (typical time scale of the decline from + maximum: decades) + NL = nova-like variable + Nr = recurrent nova + NS = system showing negative (nodal) superhumps + PW = precessing white dwarf + SH = non-SU UMa star showing either permanent or transient positive + (apsidal) superhumps + SS = supersoft X-ray source; CV with stationary hydrogen burning on + the white dwarf + SU = SU UMa star, subtype of DN + SW = SW Sex star, subtype of NL + UG = dwarf nova of either U Gem or SS Cyg subtype + UL = ultra-luminous X-ray source + UX = UX UMa star, subtype of NL + VY = VY Scl star (anti dwarf nova), subtype of NL + WZ = WZ Sge star = SU UMa star with an extremely long supercycle + ZC = Z Cam star, subtype of DN + ZZ = white dwarf shows ZZ Ceti-type pulsations + +Note (4): Apparent V magnitude at maximum brightness of: + novae (N,Na,Nb,Nc,Nr) in minimum + DN (UG,ZC,SU) in minimum + NL (UX,AC) in normal state + NL (DQ,IP,AM,VY) in high state. + SS in high state. + +Note (5): In case of eclipses magnitude at mideclipse, of: + novae (N,Na,Nb,Nc,Nr) in minimum + DN (UG,ZC,SU) in minimum + NL (UX,AC) in normal state + NL (DQ,IP,AM,VY) in high state. + SS in high state. + +Note (6): Apparent magnitude at maximum brightness of: + novae (N,Na,Nb,Nc,Nr) in outburst + DN (UG,ZC) in outburst + DN (SU) in normal outburst + DN (WZ) in echo outburst + NL (AM,VY) in low state + NL (DQ,IP) in low state + SS in low state. + +Note (7): Apparent magnitude at maximum brightness of: + DN (ZC) in standstill + DN (SU) in superoutburst + WZ in superoutburst + NL (DQ,IP) in flaring state or outburst + iNL (AM, VY) in low state + SS in low state + +Note (8): Time interval between outbursts is defined: + - for dwarf novae of subtype UG or ZC: the typical time interval + between two subsequent outbursts; + - for dwarf novae of subtype SU: + T1 is the typical time interval between two subsequent normal + outburst, and + T2 is the typical time interval between subsequent superoutbursts. + +Note (9): the * indicates, in case of object type SU, that the orbital + period has been estimated from the known superhump period using the + empirical relation given by B. Stolz and R. Schoembs (1984A&A...132..187S). + +Note (10): The second period is, in case of object type: + DQ or IP: photometric period if it is different from the + spectroscopic one + AM: polarization period = spin period of the white dwarf, if it is + different from the presumed orbital period (subtype AS) + SU: superhump period, wherever possible, at the beginning of a + superoutburst + SH: photometric period, presumably superhump period of either + permanent or transient superhumps + NS: photometric period, period of either permanent or transient + negative superhumps if 2.__Per. < Orb.Per. + +Note (11): This additional period is, in case of object type: + CP: period of coherent pulsation, (transient if f_3.__Per=T) + DQ: spin period of the white dwarf + IP: spin period of the white dwarf, usually detected in X-Rays + SW: probably the spin period of the white dwarf + +Note (12): the flag takes the values: + ':' uncertainty flag + 'T' indicating transient pulsations + 'Q' indicating the occurrence of quasi- periodic oscillations (QPO) + in objects of type N, DN, NL. + +Note (13): This additional period is, in case of object type: + CP: second period of coherent pulsation, (transient if f_4.__Per=T) + DQ: additional period, presumably due to reprocessed X-Rays + IP: additional period, usually seen in the optical and presumably + due to reprocessed X-Rays +-------------------------------------------------------------------------------- + +Byte-by-byte Description of file: lmxbdata.dat +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 12 A12 --- Name Object name (G1) + 14 A1 --- whoswho [*] * indicating that further alternative + designations are in the whoswho1.dat file + 16- 27 A12 --- AltName A frequently used alternative name (G2) + 30- 31 I2 h RAh Right Ascension J2000 (hours) + 33- 34 I2 min RAm Right Ascension J2000 (minutes) + 36- 39 F4.1 s RAs Right Ascension J2000 (seconds) + 41 A1 --- DE- Declination J2000 (sign) + 42- 43 I2 deg DEd Declination J2000 (degrees) + 45- 46 I2 arcmin DEm Declination J2000 (minutes of arc) + 48- 49 I2 arcsec DEs Declination J2000 (seconds of arc) + 51 A1 arcsec epos [0-9] Position accuracy in (G3) + 53- 54 A2 --- Type1 Object type (3) + 55 A1 --- u_Type1 [?] Uncertainty flag for object type + 57- 58 A2 --- Type2 Object type (3) + 59 A1 --- u_Type2 [?] Uncertainty flag for object type + 61- 62 A2 --- Type3 Object type (3) + 63 A1 --- u_Type3 [?] Uncertainty flag for Object type + 65- 66 A2 --- Type4 Object type (3) + 67 A1 --- u_Type4 [?] Uncertainty flag for Object type + 69 A1 --- l_mag1 [><] Limit flag for magnitude mag1 + 70- 73 F4.1 mag mag1 ? Apparent V (or B, g, R, I, K) magnitude + at maximum brightness, + in case of XT in quiescence + 74 A1 --- f_mag1 [:UBgRIJK] uncertainty flag/band for mag1 + 76 A1 --- l_mag2 [><] Limit flag for magnitude mag2 + 77- 80 F4.1 mag mag2 ? Apparent V (or B, R, I) magnitude + at mid-eclipse (4) + 81 A1 --- f_mag2 [:BRIJK] Uncertainty flag/band for mag2 + 84- 87 F4.1 mag mag3 ? Apparent V (or other) magnitude + at outburst (5) + 88 A1 --- f_mag3 [:BRI] Uncertainty flag/band for mag3 + 90 A1 --- l_mag4 Limit flag of magnitude mag4 + 91- 94 F4.1 mag mag4 ? Apparent V (or other) magnitude at + superoutburst (5) + 96 A1 --- l_LX/Lopt Limit flag on LX/Lopt + 97-103 F7.1 --- LX/Lopt ? The ratio of X-ray to optical luminosity + 106-108 I3 d T1 ? Typical time interval between two subsequent + X-ray active states in case of subtype XT + 110-118 F9.6 d Orb.Per ? Orbital period + 119 A1 --- u_Orb.Per [:*] Uncertainty flag for Orb.Per (6) + 120-128 F9.6 d 2.__Per ? Second period, in case of object type SH: + photometric period, presumably superhump + period of either permanent or transient + superhumps + 129 A1 --- u_2.__Per Uncertainty flag for 2.__Per + 130-140 F11.7 s 3.__Per ? Additional period in the system, in case of + object type + BO: period of burst oscillations = rotation + period of the neutron star; + XP: pulse period of the pulsar + 141 A1 --- u_3.__Per Uncertainty flag for 3.__Per + 142-146 F5.1 s 4.__Per ? Aditional period in the system, in case of + object type XP: optical period, presumably + due to e_processed X-Rays + 152 A1 --- EB [D1 ] Occurrence of eclipses (G4) + 153 A1 --- u_EB [?] Uncertainty flag on EB + 155 I1 --- SB [1,2]? Flag specifying the type of + spectroscopic binary (G5) + 158-164 A7 --- SpType2 Spectral type of the secondary (G6) + 167-173 A7 --- SpType1 Spectral type of the primary (G6) + 175 A1 --- l_M1/M2 Limit flag for M1/M2 + 176-180 F5.2 --- M1/M2 ? Mass ratio M1/M2 + 181 A1 --- u_M1/M2 Uncertainty flag for M1/M2 + 182-186 F5.2 --- e_M1/M2 ? Error of M1/M2 + 189 A1 --- l_Incl Limit flag for the orbital inclination + 190-193 F4.1 deg Incl ? Orbital inclination + 194 A1 --- u_Incl Uncertainty flag (:) on Incl + 196-199 F4.1 deg e_Incl ? Error of orbital inclination + 201 A1 --- l_M1 Limit flag on M1 + 202-206 F5.2 solMass M1 ? Primary mass M1 + 207 A1 --- u_M1 Uncertainty flag (:) on M1 + 209-213 F5.2 solMass e_M1 ? Error of primary mass M1 + 215 A1 --- l_M2 Limit flag for secondary mass M2 + 216-221 F6.3 solMass M2 ? Secondary mass M2 + 222 A1 --- u_M2 Uncertainty flag (:) on M2 + 223-228 F6.3 solMass e_M2 ? Error of secondary mass M2 +-------------------------------------------------------------------------------- +Note (3): the object type is coarsely characterised using + the following abbreviations: + AS = atoll source, subtype of the LMXBs + BH = black hole candidate, subtype of the LMXBs + BO = X-ray burster with coherent burst oscillations at the neutron + star spin period + DC = source with an accretion disc corona, subtype of the LMXBs + GC = source in a globular cluster + MQ = microquasar, source of relativistic jets + NS = system showing negative (nodal) superhumps + QN = quiescent neutron star LMXB + RP = primary is also seen as a radio pulsar + SH = system showing either permanent or transient superhumps + SS = supersoft X-ray source + UL = ultra-luminous X-ray source + XB = X-ray burst source + XP = X-ray pulsar + XT = transient X-ray source + ZS = Z-source, subtype of the LMXBs + +Note (4): in case of eclipses magnitude at mideclipse, + in case of XT in quiescence + +Note (5): in case of XL (XB, XT) in outburst + +Note (6): the * indicates, in case of object type SU, that the orbital + period has been estimated from the known superhump period using the + empirical relation given by B. Stolz and R. Schoembs (1984A&A...132..187S). +-------------------------------------------------------------------------------- + +Byte-by-byte Description of file: pcbdata.dat +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 12 A12 --- Name Object name (G1) + 14 A1 --- whoswho [*] * indicating that further alternative + designations are in the whoswho1.dat file + 16- 27 A12 --- AltName A frequently used alternative name (G2) + 30- 31 I2 h RAh ? Right Ascension J2000 (hours) + 33- 34 I2 min RAm ? Right Ascension J2000 (minutes) + 36- 39 F4.1 s RAs ? Right Ascension J2000 (seconds) + 41 A1 --- DE- ? Declination J2000 (sign) + 42- 43 I2 deg DEd ? Declination J2000 (degrees) + 45- 46 I2 arcmin DEm ? Declination J2000 (minutes of arc) + 48- 49 I2 arcsec DEs ? Declination J2000 (seconds of arc) + 51 A1 arcsec epos [0-9P] Position accuracy (G3) + 53- 54 A2 --- Type1 Object type (3) + 55 A1 --- u_Type1 [?] Uncertainty flag for object type + 57- 58 A2 --- Type2 Object type (3) + 59 A1 --- u_Type2 [?] Uncertainty flag for object type + 61- 62 A2 --- Type3 Object type (3) + 63 A1 --- u_Type3 [?] Uncertainty flag for object type + 65- 66 A2 --- Type4 Object type (3) + 70- 73 F4.1 mag mag1 ? Apparent V (or other) magnitude at maximum + brightness outside eclipse + 74 A1 --- f_mag1 [:BbpgRiIK] uncertainty flag/band for mag1 + 76 A1 --- l_mag2 [><] Limit flag for magnitude mag2 + 77- 80 F4.1 mag mag2 ? Apparent V (or other) magnitude at minimum + brightness, in case of eclipses magnitude + at mideclipse. + 81 A1 --- f_mag2 [:BgRiI] uncertainty flag/band for mag2 + 82- 90 F9.6 d Orb.Per Orbital period + 91 A1 --- u_Orb.Per Uncertainty flag for Orb.Per + 92-101 F10.4 s 2.__Per ? Spin period of the accretor (white dwarf + or neutron star). + 103 I1 --- EB [1,2]? Flag indicating the occurrence of + eclipses (G4) + 104 A1 --- u_EB [?] Uncertainty flag for EB + 106 I1 --- SB [1,2]? Flag specifying the type of + spectroscopic binary (G5) + 109-115 A7 --- SpType2 Spectral type of the secondary (G6) + 117-123 A7 --- SpType1 Spectral type of the primary (G6) + 125 A1 --- l_E [><] Limit flag for the orbital eccentricity + 126-129 F4.2 --- E ? Orbital eccentricity + 130 A1 --- u_E Uncertainty flag on orbital eccentricity + 132-135 F4.2 --- e_E ? Error of orbital eccentricity + 137 A1 --- l_M1/M2 Limit flag for mass ratio M1/M2 + 138-141 F4.2 --- M1/M2 ? Mass ratio M1/M2 + 142 A1 --- u_M1/M2 Uncertainty flag on mass ratio M1/M2 + 144-147 F4.2 --- e_M1/M2 ? Error of M1/M2 + 149 A1 --- l_Incl Limit flag for the orbital inclination + 150-153 F4.1 deg Incl ? Orbital inclination + 154 A1 --- u_Incl Uncertainty flag for the inclination + 156-159 F4.1 deg e_Incl ? Error of orbital inclination + 161 A1 --- l_M1 Limit flag for primary mass M1 + 162-166 F5.3 solMass M1 ? Primary mass M1 + 167 A1 --- u_M1 Uncertainty flag for primary mass M1 + 169-173 F5.3 solMass e_M1 ? Error of primary mass M1 + 175 A1 --- l_R1 Limit flag for primary radius R1 + 176-180 F5.3 solRad R1 ? Primary radius + 181 A1 --- u_R1 Uncertainty flag [:] for primary radius R1 + 183-187 F5.3 solRad e_R1 ? Error of primary radius R1 + 189 A1 --- l_M2 Limit flag for secondary mass M2 + 190-194 F5.3 solMass M2 ? Secondary mass M2 + 195 A1 --- u_M2 Uncertainty flag for secondary mass M2 + 197-201 F5.3 solMass e_M2 ? Error of secondary mass M2 + 203 A1 --- l_R2 Limit flag on secondary radius R2 + 204-209 F6.4 solRad R2 ? Secondary radius R2 + 210 A1 --- u_R2 Uncertainty flag [:] for secondary radius R2 + 212-217 F6.4 solRad e_R2 ? Error of secondary radius +-------------------------------------------------------------------------------- +Note (3): Object type coarsely characterised using the following abbreviations: + CP = coherent pulsator, contains a coherently pulsating white dwarf or + subdwarf + DD = system consists of two degenerate components + DS = detached system + EC = contains a pulsating sdB star of the EC 14026-2647 type + GC = source in a globular cluster + GP = sdB-star with g-mode pulsations + GW = contains a pulsating white dwarf of the GW Vir = PG 1159-035 type + PN = central star of a planetary nebula + RS = system shows RS CVn-like chromospheric activity + SC = sub-stellar companion +-------------------------------------------------------------------------------- + +Byte-by-byte Description of file: *refs.dat +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 12 A12 --- Name Object name + 14- 32 A19 --- BibCode BibCode + 34-302 A269 --- Text Text of reference +-------------------------------------------------------------------------------- + +Byte-by-byte Description of file: whoswho1.dat +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1 A1 --- B [B] when the name is based on B1950 position + 2- 25 A24 --- Name Object name + 27 A1 --- --- [=] + 29-212 A184 --- AltName Other name, or comment (1) +-------------------------------------------------------------------------------- +Note (1): Catalogue designations involving the equatorial coordinates + are given in the following format: + HHMM+DDMM (catalogue acronyms) if the position is given in B1950 + coordinates -- a 'B' is then present in byte 1. + JHHMM+DDMM (catalogue acronyms) if the position is given in J2000 + coordinates. + Here HHMM is the truncated right ascension in hours (HH) and + minutes (MM), DDMM the truncated declination in degrees (DD) + and arcminutes (MM), and + the sign of the declination. +-------------------------------------------------------------------------------- + +Byte-by-byte Description of file: whoswho2.dat +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1 A1 --- B [B] when the name is based on B1950 position + 2- 53 A52 --- cName Common or Provisional designation (G2) + 55- 56 A2 --- --- [->] + 58-101 A44 --- Name Usual name +-------------------------------------------------------------------------------- + +Byte-by-byte Description of file: whoswho5.dat +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 10 A10 --- Abbr Catalogue abbreviation + 14- 73 A60 --- Text Text of References +-------------------------------------------------------------------------------- + +Global Notes: + +Note (G1): Wherever possible, the designation of the object given in the + General Catalogue of Variable Stars (Cat. ) is used here. + +Note (G2): The acronyms used in lists are detailed in the last part of + the file "whoswho.txt" + +Note (G3): The number indicates the accuracy of position in seconds of arc. + If the positional error is larger than 9arcsec, or unknown, this field + is left blank. The letter [P] indicates an object with a large proper + motion. + +Note (G4): The EB flag means: + EB= : (blank) no eclipses observed. + EB=1: 1 eclipse per orbital revolution observed. + EB=2: 2 eclipses per orbital revolution observed. + EB=D: periodic eclipse-like dips observed. + +Note (G5): The SB flag means: + SB=1: single-line spectroscopic binary + SB=2: double-line spectroscopic binary + +Note (G6): Spectral types are given in the following format: + [Spectral class/Luminosity class], where the usual roman numerals for + the latter are replaced by the corresponding arabic numerals, i.e. + I = 1, II = 2, III = 3, IV = 4, V = 5, VI = 6. +-------------------------------------------------------------------------------- + +History: + * 16-Apr-2003: 7th Edition + * 28-Aug-2003: 7.1 Edition + * 12-Mar-2004: 7.2 Edition + * 01-Sep-2004: 7.3 Edition + * 24-Mar-2005: 7.4 Edition + * 25-Jul-2005: 7.5 Edition + * 01-Feb-2006: 7.6 Edition + * 29-May-2006: 7.6rev1 Edition (no new object) + * 07-Dec-2006: 7.7 Edition + * 17-Aug-2007: 7.8 Edition + * 18-Mar-2008: 7.9 Edition + * 26-Jul-2008: 7.10 Edition + * 06-Apr-2009: 7.11 Edition + * 18-Sep-2009: 7.12 Edition + * 20-Mar-2010: 7.13 Edition + * 05-Nov-2010: 7.14 Edition + * 23-Mar-2011: 7.15 Edition + +References: + Ritter H., 1984A&AS...57..385R (3rd edition) + Ritter H., 1987A&AS...70..335R (4th edition) + Ritter H., 1990A&AS...85.1179R (5th edition) (Catalogue: V/59) + Ritter H., Kolb U., 1995, in "X-ray Binaries", Lewin W.H.G, + van Paradijs J., van den Heuvel E.P. (eds), + Cambridge Univ. Press, p. 578 (Cat. ) +================================================================================ +(End) H. Ritter, U. Kolb [MPA Garching], Francois Ochsenbein [CDS] 05-Nov-2010 diff --git a/astropy/io/ascii/tests/t/cds/glob/lmxbrefs.dat b/astropy/io/ascii/tests/t/cds/glob/lmxbrefs.dat new file mode 100644 index 0000000..17f25f9 --- /dev/null +++ b/astropy/io/ascii/tests/t/cds/glob/lmxbrefs.dat @@ -0,0 +1,291 @@ +LZ Aqr 2002ApJ...581..570T Tomsick, J.A., Heindl, W.A., Chakrabarty, D., Kaaret, P. 2002, ApJ 581, 570 (Orb.Per., Spectr2) +LZ Aqr 2003ApJ...585..443S Shahbaz, T., et al. (7 authors) 2003, ApJ 585, 443 (M1/M2, Incl, M1, M2) +LZ Aqr 2004ApJ...610..933T Tomsick, J.A., Gelino, D.M., Halpern, J.P., Kaaret, P. 2004, ApJ 610, 933 +V1333 Aql 1998IAUC.6806Q...1M Chevalier, C., Ilovaisky, S.A. 1998, IAU Circ. No. 6806 (Orb.Per.) +V1333 Aql 1999A&A...347L..51C Chevalier, C., Ilovaisky, S.A., Leisy, P., Patat, F. 1999, A&A 347, L51 (Spectr2) +V1333 Aql 2007MNRAS.375.1463C Cornelisse, R., et al. (7 authors) 2007, MNRAS 375, 1463 (M1) +V1333 Aql 2008ApJ...674L..41C Casella, P., Altamirano, D., Wijnands, R., van der Klis, M. 2008, ApJ 674, L41 (3. Per.) +V1405 Aql 2001ApJ...549L..85G Galloway, D.K., Chakrabarty, D., Muno, M.P., Savov, P. 2001, ApJ 549, L85 (3. Per.) +V1405 Aql 2001MNRAS.322..827H Homer, L., et al. (7 authors) 2001, MNRAS 322, 827 (Orb.Per., 2. Per.) +V1405 Aql 2006ApJ...647.1341I Iaria, R., Di Salvo, T., Lavagetto, G., Robba, N.R., Burderi, L. 2006, ApJ 647, 1341 +V1405 Aql 2006MNRAS.370..255N Nelemans, G., Jonker, P.G., Steeghs, D. 2006, MNRAS 370, 255 +V1405 Aql 2008ApJ...680.1405H Hu, C.-P., Chou, Y., Chung, Y.-Y. 2008, ApJ 680, 1405 (Orb.Per.) +V1408 Aql 2010MNRAS.402.2671R Russell, D.M., et al. (6 authors) 2010, MNRAS 402, 2671 +V1408 Aql ................... Bayless, A., Robinson, E.L., Mason, P.A., Robertson, P. 2011, ApJ, in press = arXiv:1004.4904 (Orb.Per.) +V1487 Aql 1994Natur.371...46M Mirabel, I.F., Rodriguez, L.F. 1994, Nat 371, 46 (Incl) +V1487 Aql 2004A&A...414L..13H Harlaftis, E.T., Greiner, J. 2004, A&A 414, L13 (Spectr2, M1/M2, Incl, M1, M2) +V1487 Aql 2007ApJ...668..430D Dhawan, V., Mirabel, I.F., Ribo, M., Rodrigues, I. 2007, ApJ 668, 430 +V1487 Aql 2007ApJ...657..409N Neil, E.T., Bailyn, C.D., Cobb, B.E. 2007, ApJ 657, 409 (Orb.Per., 2. Per.) +V801 Ara 1986ApJ...305..246F Fujimoto, M.Y., Taam, R.E. 1986, ApJ 305, 246 (M1) +V801 Ara 2002ApJ...568..279G Giles, A.B., Hill, K.M., Strohmayer, T.E., Cummings, N. 2002, ApJ 568, 279 (Orb.Per., 3. Per) +V801 Ara 2002ApJ...577..377M Strohmayer, T.E., Markwardt, C.B. 2002, ApJ 577, 377 (3. Per.) +V801 Ara 2006MNRAS.373.1235C Casares, J., et al. (7 authors) 2006, MNRAS 373, 1235 (Orb.Per.) +V821 Ara 2002MNRAS.331.1065C Chaty, S., et al. (7 authors) 2002, MNRAS 331, 1065 (Spectr2) +V821 Ara 2003ApJ...583L..95H Hynes, R.I., Steeghs, D., Casares, J., Charles, P.A., O'Brien, K. 2003, ApJ 583, L95 (Orb.Per., M1/M2, M1) +V821 Ara 2008ApJ...679L.113M Miller, J.M., et al. (9 authors) 2008, ApJ 679, L113 +V821 Ara 2008MNRAS.385.2205M Munoz-Darias, T., Casares, J., Martinez-Pais, I.G. 2008, MNRAS 385, 2205 +V395 Car 1999A&A...344..101S Shahbaz, T., et al. (6 authors) 1999, A&A 344, 101 (Spectr2) +V395 Car 2004ApJ...616L.123S Shahbaz, T., et al. (7 authors) 2004, ApJ 616, L123 (Orb.Per.) +V395 Car 2005MNRAS.356..621J Jonker, P.G., Steeghs, D., Nelemans, G., van der Klis, M. 2005, MNRAS 356, 621 (Orb.Per.) +V395 Car 2007A&A...474..969S Shahbaz, T., Watson, C.A. 2007, A&A 474, 969 +V395 Car 2007ApJ...669L..85S Steeghs, D., Jonker, P.G. 2007, ApJ 669, L85 (M1/M2, Incl, M1, M2) +V822 Cen 2007A&A...470.1033C Casares, J., Bonifacio, P., Gonzalez Hernandez, J.I., Molaro, P., Zoccali, M. 2007, A&A 470, 1033 (Orb.Per., M1/M2, Incl, M1, M2) +V822 Cen 2010ApJ...716.1105K Khargaria, J., Froning, C.S., Robinson, E.L. 2010, ApJ 716, 1105 (Spectr2, Incl, M1) +BW Cir 2004ApJ...613L.133C Casares, J., Zurita, C., Shahbaz, T., Charles, P.A., Fender, R. 2004, ApJ 613, L133 ( Spectr2) +BW Cir 2009ApJS..181..238C Casares, J., et al. (11 authors) 2009, ApJS 181, 238 (Orb.Per., M1/M2, Incl, M1, M2) +V691 CrA 2003MNRAS.339..663J Jonker, P.G., van der Klis, M., Groot, P.J. 2002, MNRAS 339, 663 (Incl) +V691 CrA 2005ApJ...635..502M Munoz-Darias, T., Casares, J., Martinez-Pais, I.G. 2005, ApJ 635, 502 (M1/M2, M1, M2) +V691 CrA 2010ApJ...709..251B Bayless, A.J., Robinson, E.L., Cornell, M.E., Hynes, R.I., Ashcraft, T.A. 2010, ApJ 709, 251 (Orb.Per.) +V691 CrA 2010A&A...515A...1A Burderi, L., et al. (7 authors) 2010, A&A 515, A44 (Orb.Per.) +V691 CrA 2010MNRAS.409..755J Jain, C., Paul, B., Dutta, A. 2010, MNRAS 409, 755 (Orb.Per., 3. Per.) +UW CrB 2008ApJ...685..428M Mason, P.A., Robinson, E.L., Gray, C.L., Hynes, R.I. 2008, ApJ 685, 428 (Orb.Per., 2. Per.) +UW CrB 2009MNRAS.394..892H Hakala, P., Hjalmarsdotter, L., Hannikainen, D., Muhli, P. 2009, MNRAS 394, 892 (Orb.Per., 2. Per.) +UW CrB 2009ApJ...690.1145N Narita, T., Palmieri, J.B., Tow, E.S. 2009, ApJ 690, 1145 +V404 Cyg 1993MNRAS.265..834C Casares, J., Charles, P.A., Naylor, T., Pavlenko, E.P. 1993, MNRAS 265, 834 (2. Per.) +V404 Cyg 1994MNRAS.271L...5C Casares, J., Charles, P.A. 1994, MNRAS 271, L5 (Orb.Per., M1/M2) +V404 Cyg 1994MNRAS.271L..10S Shahbaz, T., et al. (6 authors) 1994, MNRAS 271, L10 (Incl, M2) +V404 Cyg 1996MNRAS.282..977S Shahbaz, T., Bandyopadhyay, R., Charles, P.A., Naylor, T. 1996, MNRAS 282, 977 (M1) +V404 Cyg 2009ApJ...706L.230M Miller-Jones, J.C.A., et al. (7 authors) 2009, ApJ 706, L230 +V404 Cyg 2010ApJ...716.1105K Khargaria, J., Froning, C.S., Robinson, E.L. 2010, ApJ 716, 1105 (Spectr2, Incl, M1) +V1341 Cyg 1998ApJ...493L..39C Casares, J., Charles, P.A., Kuulkers, E. 1997, ApJ 493, L39 (Spectr2) +V1341 Cyg 2009MNRAS.395.2029E Elebert, P., Callanan, P.J., Torres, M.A.P., Garcia, M.R. 2009, MNRAS 395, 2029 +V1341 Cyg 2010MNRAS.401.2517C Casares, J., Gonzalez Hernandez, J.I., Israelian, G., Rebolo, R. 2010, MNRAS 401, 2517 (Orb.Per., M1/M2, Incl, M1, M2) +V1341 Cyg ................... Sazonov, A.N. 2010, arXiv:1011.3980 +V1727 Cyg 1996MNRAS.282.1437S Shahbaz, T. et al. (7 authors) 1996, MNRAS 282, 1437 (Spectr2) +V1727 Cyg 2007A&A...476..301B Bozzo, E., et al. (11 authors) 2007, A&A 476, 301 (Orb.Per.) +V1727 Cyg 2008A&A...485..773B Bothwell, M.S., Torres, M.A.P., Garcia, M.R., Charles, P.A. 2008, A&A 485, 773 +V1727 Cyg 2009ApJ...706.1069L Lin, J., Nowak, M.A., Chakrabarty, D. 2009, ApJ 706, 1069 (Orb.Per.) +IL Lup 1998ApJ...499..375O Orosz, J.A., Jain, R.K., Bailyn, C.D., McClintock, J.E., Remillard, J.A. 1998, ApJ 499, 375 (Spectr2, M2) +IL Lup ................... Orosz, J.A. 2003, in: A Massive Star Odyssey: From Main Sequence to Supernova, K.A. van der Hucht, A. Herrero, C. Esteban (eds.), IAU Symp. No. 212, ASP, San Francisco, p. 365 (Orb.Per., M1/M2, Incl, M1) +IL Lup 2004ApJ...615..880B Buxton, M.M., Bailyn, C.D. 2004, ApJ 615, 880 +IL Lup 2004ApJ...610..378P Park, S.Q., et al. (11 authors) 2004, ApJ 610, 378 +V616 Mon 2007ApJ...663.1215F Froning, C.S., Robinson, E.L., Bitner, M.A. 2007, ApJ 663, 1215 (Spectr2, Incl) +V616 Mon 2007AJ....133..162H Harrison, T.E., Howell, S.B., Szkody, P., Cordova, F.A. 2007, AJ 133, 162 (Spectr2) +V616 Mon 2008ApJ...673L.159C Cantrell, A.G., Bailyn, C.D., McClintock, J.E., Orosz, J.A. 2008, ApJ 673, L159 +V616 Mon 2008MNRAS.384..849N Neilsen, J., Steeghs, D., Vrtilek, S.D. 2008, MNRAS 384, 849 +V616 Mon 2010ApJ...710.1127C Cantrell, A.G., et al. (9 authors) 2010, ApJ 710, 1127 (Spectr2, M1/M2, Incl, M1, M2) +V616 Mon 2010A&A...516A...1L Gonzalez Hernandez, J.I., Casares, J. 2010, A&A 516, A58 (Orb.Per.) +GR Mus 1987ApJ...313..792M Motch, C., Pedersen, H., Beuermann, K., Pakull, M.W., Courvoisier, T.J.-L. 1987, ApJ 313, 792 (Incl) +GR Mus 2007MNRAS.380.1182B Barnes, A.D., et al. (7 authors) 2007, MNRAS 380, 1182 (M1/M2, M1, M2) +GR Mus 2007MNRAS.377..198B Bhattacharyya, S. 2007, MNRAS 377, 198 (3. Per.) +GR Mus 2009A&A...493..145D Diaz Trigo, M., et al. (6 authors) 2009, A&A 493, 145 (Orb.Per.) +GU Mus 1994ApJ...436..848O Orosz, J., Bailyn, C.D., Remillard, R.A., McClintock, J.E., Foltz, C.B. 1994, ApJ 436, 848 (M1/M2) +GU Mus 1996MNRAS.282..191O O'Donoghue, D., Charles, P.A. 1996, MNRAS 282, 191 (2. Per.) +GU Mus 1996ApJ...468..380O Orosz, J.A., Bailyn, C.D., McClintock, J.E., Remillard, R.A. 1996, ApJ 468, 380 (Orb.Per., M2) +GU Mus 1997NewA....1..299C Casares, J., Martin, E.L., Charles, P.A., Molaro, P., Rebolo, R. 1997, New Astron. 1, 299 (Orb.Per., Spectr2) +GU Mus 2001AJ....122..971G Gelino, D.M., Harrison, T.E., McNamara, B.J. 2001, AJ 122, 971 (Incl, M1) +GU Mus 2002A&A...391..993S Sutaria, F.K., et al. (10 authors) 2002, A&A 391, 993 +GU Mus 2003MNRAS.340..447H Hynes, R.I., et al. (6 authors) 2003, MNRAS 340, 447 +QX Nor 2002ApJ...568..901W Wachter, S., Hoard, D.W., Bailyn, C.D., Corbel, S., Kaaret, P. 2002, ApJ 568, 901 (Orb.Per.) +QX Nor ................... Strohmayer, T., Bildsten, L. 2006, in: Compact Stellar X-Ray Sources, W.H.G. Lewin, and M. van der Klis (eds.), Cambridge Astrophysics Series 39, Cambridge University Press, Cambridge, p. 113 (3. Per.) +QX Nor 2008A&A...479..177K Keek, L., et al. (6 authors) 2008, A&A 479, 177 +QX Nor 2010ApJ...712..964G Guever, T., Oezel, F., Cabrera-Lavers, A., Wroblewski, P. 2010, ApJ 712, 964 (M1) +V381 Nor 2002ApJ...568..845O Orosz, J.A., et al. (9 authors) 2002, ApJ 568, 845 (Orb.Per, Spectr2, M1/M2, Incl, M1, M2) +V381 Nor 2004MNRAS.353..980K Kubota, A., Done, C. 2004, MNRAS 353, 980 +V2107 Oph 1996ApJ...459..226R Remillard, R.A., Orosz, J.A., McClintock, J.E., Bailyn, C.D. 1996, ApJ 459, 226 (Orb.Per.) +V2107 Oph 1997PASP..109..461F Filippenko, A.V., Matheson, T., Leonard, D.C., Barth, A.J., van Dyk, S.D. 1997, PASP 109, 461 (Incl, M1, M2) +V2107 Oph 1997AJ....114.1170H Harlaftis, E.T., Steeghs, D., Horne, K., Filippenko, A.V. 1997, AJ 114, 1170 (Spectr2, M1/M2) +V2134 Oph 1984ApJ...283..765C Cominsky, L.R., Wood, K.S. 1984, ApJ 283, 765 (Incl) +V2134 Oph 2001A&A...376..532O Oosterbroek, T., Parmar, A.N., Sidoli, L., in 't Zand, J.J.M., Heise, J. 2001, A&A 376, 532 (Orb.Per.) +V2134 Oph 2006MNRAS.372..479C Cackett, E.M., et al. (6 authors) 2006, MNRAS 372, 479 +V2216 Oph 2006MNRAS.368..781K Kong, A.K.H., Charles, P.A., Homer, L., Kuulkers, E., O'Donoghue, D. 2006, MNRAS 368, 781 (Orb.Per.) +V2216 Oph 2007MNRAS.380.1219C Cornelisse, R., et al. (7 authors) 2007, MNRAS 380, 1219 (Orb.Per.) +V2216 Oph 2009ApJ...696.1987H Harris, R.J., et al. (6 authors) 2009, ApJ 696, 1987 (Orb.Per.) +V2293 Oph 1996A&A...314..123M Masetti, N., Bianchini, A., Bonibaker, J., Della Valle, M., Vio, R. 1996, A&A 314, 123 (2. Per.) +V2293 Oph 2002MNRAS.331.1065C Chaty, S., et al. (7 authors) 2002, MNRAS 331, 1065 (Spectr2) +V1055 Ori 2006MNRAS.370..255N Nelemans, G., Jonker, P.G., Steeghs, D. 2006, MNRAS 370, 255 (Orb.Per., 2. Per.) +V1055 Ori 2008PASP..120..848S Shahbaz, T., Watson, C.A., Zurita, C., Villaver, E., Hernandez-Peralta, H. 2008, PASP 120, 848 +V1055 Ori 2008ApJ...672L..37S Strohmayer, T.E., Markwardt, C.B., Kuulkers, E. 2008, ApJ 672, L37 (3. Per.) +V518 Per 2000MNRAS.317..528W Webb, N.A., Naylor, T., Ioannou, Z., Charles, P.A., Shahbaz, T. 2000, MNRAS 317, 528 (Orb. Per., M1/M2) +V518 Per 2003ApJ...599.1254G Gelino, D.M., Harrison, T.E. 2003, ApJ 599, 1254 (Spectr2, Incl, M1, M2) +V518 Per 2007MNRAS.374..657R Reynolds, M.T., Callanan, P.J., Fillipenko, A.V. 2007, MNRAS 374, 657 +V518 Per 2009PASJ...61....1I Kato, T., et al. (130 authors) 2009, PASJ 61, S395 (2. Per.) +V4134 Sgr 1985MNRAS.216.1033M Mason, K.O., Parmar, A.N., White, N.E. 1985, MNRAS 216, 1033 (Orb.Per.) +V4134 Sgr 2006ApJ...641..410K Kaaret, P., et al. (7 authors) 2006, ApJ 641,410 +V4580 Sgr 2008MNRAS.391.1619D Deloye, C.J., Heinke, C.O., Taam, R.E., Jonker, P.G. 2008, MNRAS 391, 1619 +V4580 Sgr 2008MNRAS.389.1851S Di Salvo, T., Burderi, L., Riggio, A., Papitto, A., Menna, M.T. 2008, MNRAS 389, 1851 +V4580 Sgr 2008ApJ...675.1468H Hartman, J.M., et al. (9 authors) 2008, ApJ 675, 1468 (Orb.Per., 3. Per.) +V4580 Sgr 2009A&A...496L..17B Burderi, L., et al. (7 authors) 2009, A&A 496, L17 (Orb.Per., 3. Per.) +V4580 Sgr 2009ApJ...694L..21C Cackett, E.M., et al. (7 authors) 2009, ApJ 694, L21 (Incl) +V4580 Sgr 2009A&A...495L...1C Cornelisse, R., et al. (9 authors) 2009, A&A 495, L1 +V4580 Sgr 2009MNRAS.395..884E Elebert, P., et al. (11 authors) 2009, MNRAS 395, 884 (M1/M2, M1, M2) +V4580 Sgr 2009ApJ...702.1673H Hartman, J.M., et al. (7 authors) 2009, ApJ 702, 1673 (Orb.Per., 3. Per.) +V4580 Sgr 2009ApJ...691.1035H Heinke, C.O., Jonker, P.G., Wijnands, R., Deloye, C.J., Taam, R.E. 2009, ApJ 691, 1035 +V4580 Sgr 2009MNRAS.400..492I Ibragimov, A., Poutanen, J. 2009, MNRAS 400, 492 (Incl) +V4580 Sgr 2009ApJ...698L..60P Patruno, A., Wijnands, R., van der Klis, M. 2009, ApJ 698, L60 (3. Per.) +V4580 Sgr 2009ApJ...694.1115W Wang, Z., Bassa, C., Cumming, A., Kaspi, V.M. 2009, ApJ 694, 1115 +V4634 Sgr 2005ApJ...634.1261T Thompson, T.W.J., Rothschild, R.E., Tomsick, J.A., Marshall, H.L. 2005, ApJ 634, 1261 (3. Per.) +V4634 Sgr 2010AstL...36..738M Meshcheryakov, A.V., Revnivtsev, M.G., Pavlinsky, M.N., Khamitov, I., Bikmaev, I.F. 2010, Astron. Letters 36, 738 (Orb.Per.) +V4641 Sgr 2001IBVS.5068....1G Goranskij, V.P. 2001, IBVS No. 5068 (Orb.Per.) +V4641 Sgr 2001ApJ...555..489O Orosz, J., et al. (9 authors) 2001, ApJ 555, 489 (Orb.Per.) +V4641 Sgr ................... Orosz, J.A. 2003, in: A Massive Star Odyssey: From Main Sequence to Supernova, K.A. van der Hucht, A. Herrero, C. Esteban (eds.), IAU Symp. No. 212, ASP, San Francisco, P. 365 (M1/M2, Incl, M1, M2) +V4641 Sgr 2006PASJ...58..595S Sadakane, K, et al. (13 authors) 2006, PASJ, 58, 595 +V5511 Sgr 2007MNRAS.375..971P Papitto, A., et al. (6 authors) 2006, MNRAS 375, 971 (Orb.Per., 3. Per.) +V5511 Sgr 2008MNRAS.391..254C Chung, C.T.Y., Galloway, D.K., Melatos, A. 2008, MNRAS 391, 254 (Orb.Per., 3. Per.) +V5511 Sgr 2009ApJ...698L..60P Patruno, A., Wijnands, R., van der Klis, M. 2009, ApJ 698, L60 (3. Per.) +V818 Sco 2003A&A...398L..25M Mirabel, I.F., Rodrigues, I. 2003, A&A 398, L25 +V818 Sco 2003PASP..115..739V Vanderlinde, K.W., Levine, A.M., Rappaport, S.A. 2003, PASP 115, 739 (Orb.Per.) +V926 Sco 1998A&A...332..561A Augusteijn, T., van der Hooft, F., de Jong, J.A., van Kerkwijk, M.H., van Paradijs, J. 1998, A&A 332, 561 (Orb.Per.) +V926 Sco 2006MNRAS.373.1235C Casares, J., et al. (7 authors) 2006, MNRAS 373, 1235 (Orb.Per.) +V1033 Sco 2001ApJ...554.1290G Greene, J., Bailyn, C.D., Orosz, J.A. 2001, ApJ 554, 1290 (Orb.Per.) +V1033 Sco 2003MNRAS.339.1031S Shahbaz, T. 2003, MNRAS 339, 1031 (M1/M2, Incl, M1, M2) +V1033 Sco 2006A&A...457..249F Foellmi, C., Depagne, E., Dall, T.H., Mirabel, I.F. 2006, A&A 457, 249 (Spectr2) +V1033 Sco 2008A&A...478..203G Gonzalez Hernandez, J.I., Rebolo, R., Israelian, G. 2008, A&A 478, 203; Erratum in A&A 499, 891 (2009) (Orb.Per.) +V1101 Sco 1997A&A...325.1035B Barziv, O., et al. (8 authors) 1997, A&A 325, 1035 +V1101 Sco 1997ApJ...490..401W Wachter, S. 1997, ApJ 490, 401 (Orb.Per.) +MM Ser ................... Ponman, T.J. 1981, Space Sci. Rev. 30, 353 (2. Per.) +MM Ser ................... Mason, K.O. 1986, in: Physics of Accretion onto Compact Objects, K.O. Mason, M.G. Watson and N.E. White (eds.), Lecture Notes in Physics 266, Springer Verlag, Heidelberg, p. 29 (Orb.Per.) +MM Ser 2004MNRAS.348..100H Hynes, R.I., et al. (7 authors) 2004, MNRAS 348, 100 +KZ TrA 1981ApJ...244.1001M Middleditch, J., Mason, K.O., Nelson, J.E., White, N.E. 1981, ApJ 244, 1001 (Orb.Per.) +KZ TrA 2007ApJ...660..605K Krauss, M.I., Schulz, N.S., Chakrabarty, D., Juett, A.M., Cottam, J. 2007, ApJ 660, 605 (3. Per.) +KZ TrA 2010ApJ...708.1500C Camero-Arranz, A., Finger, M.H., Ikhsanov, N.R., Wilson-Hodge, C.A., Beklen, E. 2010, ApJ 708, 1500 (3. Per.) +LU TrA ................... Brammer, G., Wachter, S., Hoard, D.W., Smale, A.P. 2001, BAAS 33(4), Abstr. 6.1 (Orb.Per.) +LU TrA 2006MNRAS.370..255N Nelemans, G., Jonker, P.G., Steeghs, D. 2006, MNRAS 370, 255 +KV UMa 2006ApJ...642..438G Gelino, D.M., et al. (6 authors) 2006, ApJ 642, 438 (Spectr2, M1/M2, Incl, M1, M2) +KV UMa 2008ApJ...679..732H Gonzalez-Hernandez, J.I., et al. (8 authors) 2008, ApJ 679, 732 (Orb.Per., Spectr2) +KV UMa 2009MNRAS.399..539C Calvelo, D.E., et al. (7 authors) 2009, MNRAS 399, 539 (M1/M2) +KV UMa 2009PASJ...61....1I Kato, T., et al. (130 authors) 2009, PASJ 61, S395 (2. Per.) +MM Vel 1999PASP..111..969F Filippenko, A.V., et al. (6 authors) 1999, PASP 111, 969 (Orb.Per., M1/M2, Incl, M1, M2) +MM Vel 2003MNRAS.340..447H Hynes, R.I., et al. (6 authors) 2003, MNRAS 340, 447 +MM Vel ................... Macias, P., et al. (8 authors) 2011, BAAS 43, abstract 143.04 +UY Vol 2009MNRAS.399.2055B Bassa, C.G., Jonker, P.G., Steeghs, D., Torres, M.A.P. 2009, MNRAS 399, 2055 (M1) +UY Vol ................... Galloway, D.K., Chakrabarty, D., Lin, J.R. 2009, ATel #2094 (3. Per.) +UY Vol 2010ApJ...711L.148G Galloway, D.K., Chakrabarty, D., Lin, J.R., Hartman, J.M. 2010, ApJ 711, L148 (3. Per.) +UY Vol 2009ApJ...697L..14H Hynes, R.I., Jones, E.D. 2009, ApJ 697, L14 +UY Vol 2009MNRAS.394L.136M Munoz-Darias, T., et al. (7 authors) 2009, MNRAS 394, L136 +UY Vol 2009ApJS..183..156W Wolff, M.T., Ray, P.S., Wood, K.S., Hertz, P.L. 2009, ApJS 183, 156 (Orb.Per.) +QZ Vul 1996PASP..108..762H Harlaftis, E., Horne, K., Filippenko, A.V. 1996, PASP 108, 762 (Spectr2) +QZ Vul 1996MNRAS.282..191O O'Donoghue, D., Charles, P.A. 1996, MNRAS 282, 191 (2. Per.) +QZ Vul ................... Orosz, J.A. 2003, in: A Massive Star Odyssey: From Main Sequence to Supernova, K.A. van der Hucht, A. Herrero, C. Esteban (eds.), IAU Symp. No. 212, ASP, San Francisco, p. 365 (M1/M2, Incl, M1, M2) +QZ Vul 2004AJ....127..481I Ioannou, Z., Robinson, E.L., Welsh, W.F., Haswell, C.A. 2004, AJ 127, 481 (Orb.Per) +V406 Vul 2001IAUC.7644....1C Filippenko, A.V., Chornock, R. 2001, IAU Circ. No. 7644 (Orb.Per., Spectr2, M1) +V406 Vul 2004PASJ...56....1S Uemura, M., et al. (7 authors) 2004, PASJ 56, S147 (2. Per.) +AC 211 2003A&A...399..211I Ioannou, Z., et al. (7 authors) 2003, A&A 399, 211 (Orb.Per.) +AC 211 2004MNRAS.350..649V van Zyl, L., et al. (6 authors) 2004, MNRAS 350, 649 (M1/M2, Incl, M1, M2) +AC 211 2006ApJS..163..372W Wen, L., Levine, A.M., Corbet, R.H.D., Bradt, H.V. 2006, ApJS 163, 372 (Orb.Per.) +LMC X-2 1991ApJ...373..228C Cowley, A.P., Schmidtke, P.C., Crampton, D., Hutchings, J.B., Bolte, M. 1991, ApJ 373, 228 (2. Per.) +LMC X-2 2007MNRAS.381..194C Cornelisse, R., et al. (7 authors) 2007, MNRAS 381, 194 (Orb.Per.) +M 51 X-7 2002ApJ...581L..93L Liu, J.-F., Bregman, J.N., Irwin, J., Seitzer, P. 2002, ApJ 581, L93 +M 51 X-7 2005ApJ...635..198D Dewangan, G.C., Griffiths, R.E., Choudhuey, M., Miyaji, T., Schurch, N.J. 2005, ApJ 635, 198 (Orb.Per.) +NGC 104-W37 2005ApJ...622..556H Heinke, C.O., Grindaly, J.E., Edmonds, P.D. 2005, ApJ 622, 556 (Orb.Per.) +NGC 104-X5 2002ApJ...564L..17E Edmonds, P.D., Heinke, C.O., Grindaly, J.E., Gilliland, R.L. 2002, ApJ 564, L17 (Orb.Per.) +NGC 104-X5 2003ApJ...588..452H Heinke, C.O., Grindlay, J.E., Lloyd, D.A., Edmonds, P.D. 2003, ApJ 588, 452 (Orb.Per.) +NGC 104-X7 2002ApJ...564L..17E Edmonds, P.D., Heinke, C.O., Grindaly, J.E., Gilliland, R.L. 2002, ApJ 564, L17 (Orb.Per.) +NGC 104-X7 2003ApJ...588..452H Heinke, C.O., Grindlay, J.E., Lloyd, D.A., Edmonds, P.D. 2003, ApJ 588, 452 +NGC 7078-X2 2005ApJ...634L.105D Dieball, A., et al. (8 authors) 2005, ApJ 634, L105 (Orb.Per.) +J0029+5934 2008ApJ...680..615J Jonker, P.G., Torres, M.A.P., Steeghs, D. 2008, ApJ 680, 615 +J0029+5934 2008ApJ...672.1079T Torres, M.A.P., et al. (13 authors) 2008, ApJ 672, 1079 +J0029+5934 2009ApJ...698L..60P Patruno, A., Wijnands, R., van der Klis, M. 2009, ApJ 698, L60 (3. Per.) +J0029+5934 2010A&A...517A...1H Lewis, F., et al. (21 authors) 2010, A&A 517, A72 +J0029+5934 2010ApJ...722..909P Patruno, A. 2010, ApJ 722, 909 (Orb.Per., 3. Per.) +J0029+5934 2011ApJ...726...26H Hartman, J.M., Galloway, D.K., Chakrabarty, D. 2011, ApJ 726, 26 (Orb.Per., 3. Per.) +J0029+5934 ................... Papitto, A., et al. (6 authors) 2011, A&A, subm. = arXiv:1006.1303 (Orb.Per., 3. Per.) +0042+3244 ................... Charles, P., Thorstensen, J., Bowyer, S. 1978, MNRAS 183, 29P +0042+3244 ................... Watson, M.G., Ricketts, M.J. 1978, MNRAS 183, 35P (Orb.Per.) +J0042+4118 ................... Barnard, R., Kolb, U., Osborne, J.P. 2002, in: New Visions of the X-ray Universe in the XMM-Newton and Chandra Era, F. Jensen (ed.), ESA SP-488, (has not been published), = astro-ph/0203475 (Orb.Per.) +J0043+4107 2002ApJ...581L..27T Trudolyubov, S.P., et al. (7 authors) 2002, ApJ 581, L27 (Orb.Per.) +J0043+4107 2006MNRAS.366..287B Barnard, R., et al. (6 authors) 2006, MNRAS 366, 287 +J0043+4112 2004A&A...419.1045M Mangano, V., Israel, G.L., Stella, L. 2004, A&A 419, 1045 (Orb.Per.) +J0055-3738 2003ApJ...590L..13K Kong, A.K.H., Di Stefano, R. 2003, ApJ 590, L13 (Orb.Per.) +0512-4006 2009ApJ...699.1113Z Zurek, D.R., Knigge, C., Maccarone, T.J., Dieball, A., Long, K.S. 2009, ApJ 699, 1113 (Orb.Per.) +0512-4006 2010MNRAS.406.2087M Maccarone, T.J., Long, K.S., Knigge, C., Dieball, A., Zurek, D.R. 2010, MNRAS 406, 2087 +0918-5459 ................... Zhong, J., Wang, Z. 2010, ApJ, subm. = arXiv:1006.3980 (Orb.Per.) +J0929-3123 2002ApJ...576L.137G Galloway, D.K., Chackrabarty, D., Morgan, E.H., Remillard, R.A. 2002, ApJ 576, L137 (Orb.Per., 3. Per.) +J0929-3123 2009A&A...497..445I Iacolina, M.N., Burgay, M., Burderi, L., Posenti, A., Di Salvo, T. 2009, A&A 497, 445 +J0929-3123 2009ApJ...698L..60P Patruno, A., Wijnands, R., van der Klis, M. 2009, ApJ 698, L60 (3. Per.) +J1023+0038 2005AJ....130..759T Thorstensen, J.R., Armstrong, E. 2005, AJ 130, 759 (Spectr2) +J1023+0038 ................... Archibald, A.M., et al. (18 authors) 2009, Sci 324, 1411 (Orb.Per., 3. Per., M1/M2, Incl) +J1023+0038 2009ApJ...703.2017W Wang, Z., et al. (7 authors) 2009, ApJ 703, 2017 +J1047+1234 2006ApJ...650..879F Fabbiano, G., et al. (12 authors) 2006, ApJ 650, 879 (Orb.Per.) +J1227-4853 2008A&A...487..271B Butters, O.W., Norton, A.J., Hakala, P., Mukai, K., Barlow, E.J. 2008, A&A 487, 271 (3. Per.) +J1227-4853 2009MNRAS.395..386P Pretorius, M.L. 2009, MNRAS 395, 386 +J1227-4853 2009PASJ...61L..13S Saitou, K., Tsujimoto, M., Ebisawa, K., Ishida, M. 2009, PASJ 61, L13 +J1227-4853 2010A&A...515A...1A de Martino, D., et al. (10 authors) 2010, A&A 515, A25 (Orb.Per.) +J1242+3232 2007A&A...471L..55C Carpano, S., Pollock, A.M.T., King, A.R., Wilms, J., Ehle, M. 2007, A&A 471, L55 (Orb.Per.) +1323-6152 2005A&A...436..195B Boirin, L., Mendez, M., Diaz Trigo, M., Parmar, A.N., Kaastra, J.S. 2005, A&A 436, 195 +1323-6152 ................... Levine, A.M., Corbet, R. 2006, ATel #940 (Orb.Per.) +1323-6152 2009A&A...500..873B Balucinska-Church, M., Dotani, T., Hirotsu, T., Church, M.J. 2009, A&A 500, 873 +J1538-5542 ................... Kennea, J.A., et al. (12 authors) 2007, ATel #1209 (Orb.Per.) +1543-6224 2004MNRAS.348L...7N Nelemans, G., Jonker, P.G., Marsh, T.R., van der Klis, M. 2004, MNRAS 348, L7 +1543-6224 2004ApJ...616L.139W Wang, Z., Chakrabarty, D. 2004, ApJ 616, L139 (Orb.Per.) +1624-4904 2001A&A...378..847B Balucinska-Church, M., Barnard, R., Church, M.J., Smale, A.P. 2001, A&A 378, 847 +1624-4904 2001ApJ...550..962S Smale, A.P., Church, M.J., Balucinska-Church, M. 2001, ApJ 550, 962 (Orb.Per.) +J1650-4957 2003ApJ...586..419K Kalemci, E., et al. (8 authors) 2003, ApJ 586, 419 +J1650-4957 2003ApJ...592.1100T Tomsick, J.A., Kalemci, E., Corbel, S., Kaaret, P. 2003, ApJ 592, 1100 +J1650-4957 2004ApJ...616..376O Orosz, J.A., McClintock, J.E., Remiilard, R.A., Corbel, S. 2004, ApJ 616, 376 (Orb.Per., Spectr2, Incl, M1) +J1659-1515 ................... Belloni, T.M., Munoz-Darias, T., Kuulkers, E. 2010, ATel #2926 (Orb.Per.) +1705-4402 1987ApJ...323..288L Langmeier, M., Sztajno, M., Hasinger, G., Truemper, J., Gottwald, M. 1987, ApJ 323, 288 (Orb.Per.) +1705-4402 2009ApJ...692...73H Homan, J., Kaplan, D.L., van den Berg, M., Young, A.J. 2009, ApJ 692, 73 +J1710-2807 2009A&A...502..905Y Younes, G., Boirin, L., Sabra, B. 2009, A&A 502, 905 +J1710-2807 ................... Jain, C., Paul. B. 2011, MNRAS, in press = arXiv:1011.6054 (Orb.Per.) +1728-3347 1999ApJ...516L..81S Strohmayer, T.E., Markwardt, C.B. 1999, ApJ 516, L81 (3. Per.) +1728-3347 2001ApJ...551..907V van Straaten, S., van der Klis, M., Kuulkers, E., Mendez, M. 2001, ApJ 551, 907 (3. Per.) +1728-3347 2010ApJ...724..417G Galloway, D.K., Yao, Y., Marshall, H., Misanovic, Z., Weinberg, N. 2010, ApJ 724, 417 (Orb.Per.) +1740-2943 1997A&A...319..184A Marti, J., Mirabel, I.F., Chaty, S., Rodriguez, L.F. 2000, A&A 319, 184 +1740-2943 2002ApJ...578L.129S Smith, D.M., Heindl, W.A., Swank, J.H. 2002, ApJ 578, L129 (Orb.Per.) +J1744-2844 1997IAUC.6530Q...1K Finger, M.H., Robinson, C.R., Harmon, B.A., Vaughan, B.A. 1997, IAU Circ. No. 6530 (Orb.Per., 3. Per.) +J1744-2844 2007MNRAS.380.1511G Gosling, A.J., Bandyopadhyay, R.M., Miller-Jones, J.C.A., Farrell, S.A. 2007, MNRAS 380, 1511 +J1745-2901 2009A&A...495..547D Degenaar, N., Wijnands, R. 2009, A&A 495, 547 +J1745-2901 2009PASJ...61....1I Hyodo, Y., et al. (6 authors) 2009, PASJ 61, S99 (Orb.Per.) +J1745-2900 2005ApJ...622L.113M Muno, M.P., et al. (7 authors) 2005, ApJ 622, L113 +J1745-2900 2005A&A...443..571P Porquet, D., et al. (7 authors) 2005, A&A 443, 571 (Orb.Per.) +1746-3702 2002AJ....123.3255H Homer, L., Anderson, S.F., Margon, B., Downes, R.A., Deutsch, E.W. 2002, AJ 123, 3255 +1746-3702 2004MNRAS.347..334B Baluchinska-Church, M., Church, M.J., Smale, A.P. 2004, MNRAS 347, 334 (Orb.Per.) +1746-3702 ................... Levine, A.M., Corbet, R. 2006, ATel #940 (Orb.Per.) +1747-3116 2003A&A...406..233I in 't Zand, J.J.M., et al. (9 authors) 2003, A&A 406, 233 (Orb.Per.) +1747-3116 2003A&A...409..659I in 't Zand, J.J.M., Strohmayer, T.E., Markwardt, C.B., Swank, J. 2003, A&A 409, 659 +J1748-3607 2006ApJ...639L..31B Bhattacharyya, S., Strohmayer, T.E., Markwardt, C.B., Swank, J.H. 2006, ApJ 639, L31 (3. Per.) +J1748-3607 ................... Gavriil, F.P., Strohmayer, T.E., Bhattacharyya, S. 2009, ApJ, subm. = arXiv:0909.1607 (Orb.Per.) +J1748-2446 2011A&A...526L...3P Papitto, A., et al. (8 authors) 2011, A&A 526, L3 (Orb.Per., 3. Per) +J1748-2446 ................... Strohmayer, T.E., Markwardt, C.B., Pereira, D., Smith, E.A. 2010, ATel #2946 +J1748-2021#1 2007ApJ...669L..29G Gavriil, F.P., Strohmayer, T.E., Swank, J.H., Markwardt, C.B. 2007, ApJ 669, L29 (3. Per.) +J1748-2021#1 2008ApJ...674L..45A Altamirano, D., Casella, P., Patruno, A., Wijnands, R., van der Klis, M. 2008, ApJ 674, L45 (M2) +J1748-2021#1 2009ApJ...690.1856P Patruno, A., et al. (6 authors) 2009, ApJ 690, 1856 (Orb.Per., 3. Per.) +J1748-2021#2 2010ApJ...714..894H Heinke, C.O., et al. (14 authors) 2009, ApJ 714, 894 +J1748-2021#2 2010ApJ...712L..58A Altamirano, D., et al. (9 authors) 2010, ApJ 712, L58 (Orb.Per., 3. Per., M2) +J1749-2808 2011A&A...525A...1P Ferrigno, C., et al. (10 authors) 2010, A&A 525, A48 (Orb.Per., 3. Per.) +J1749-2808 ................... Markwardt, C.B., Strohmayer, T.E. 2010, ApJ 717, L49 (Orb.Per., 3. Per., Incl, M2) +J1749-2808 2011ApJ...727L..18A Altamirano, D., et al. (15 authors) 2011, ApJ 727, L18 +J1751-3057 2010MNRAS.409.1136A Altamirano, D., et al. (6 authors) 2010, MNRAS 409, 1136 +J1751-3057 2010MNRAS.407.2575P Papitto, A., et al. (8 authors) 2010, MNRAS 407, 2575 (Orb.Per., 3. Per., M2) +J1751-3057 2011A&A...526A...1D Riggio, A., et al. (8 authors) 2011, A&A 526, A95 (Orb.Per., 3. Per.) +J1751-3037 2002ApJ...575L..21M Markwardt, C.B., Swank, J., Strohmayer, T.E., in 't Zand, J.J.M. 2002, ApJ 575, L21, Erratum in ApJ 667, L211 (2007) (Orb.Per., 3. Per., M2) +J1751-3037 2008MNRAS.383..411P Papitto, A., Menna, M.T., Burderi, L., Di Salvo, T., Riggio, A. 2008, MNRAS 383, 411 (Orb.Per., 3. Per.) +J1751-3037 2009ApJ...698L..60P Patruno, A., Wijnands, R., van der Klis, M. 2009, ApJ 698, L60 (3. Per.) +J1753-0127 2008ApJ...681.1458Z Zurita, C., et al. (6 authors) 2008, ApJ 681, 1458 (2. Per.) +J1753-0127 2009MNRAS.392..309D Durant, M., Gandhi, P., Shahbaz, T., Peralta, H.H., Dhillon, V.S. 2009, MNRAS 392, 309 +J1753-0127 2009MNRAS.399..281H Hynes, R.I., O'Brien, K., Mullally, F., Ashcraft, T. 2009, MNRAS 399, 281 +J1753-0127 2009MNRAS.395.1257R Reis, R.C., Fabian, A.C., Ross, R.R., Miller, J.M. 2009, MNRAS 395, 1257 +J1756-2506 2007ApJ...668L.147K Krimm, H.A., et al. (13 authors) 2007, ApJ 668, L147; erratum in ApJ 703, L183 (2009) (Orb.Per, 3. Per., M2) +J1756-2506 2010MNRAS.403.1426P Patruno, A., Altamirano, D., Messenger, C. 2009, MNRAS 403, 1426 (Orb.Per., 3. Per.) +1758-2544 2002ApJ...580L..61R Rothstein, D.M., et al. (6 authors) 2002, ApJ 580, L61 (Spectr2) +1758-2544 2002ApJ...578L.129S Smith, D.M., Heindl, W.A., Swank, J.H. 2002, ApJ 578, L129 (Orb.Per.) +1758-2544 2010A&A...519A...1L Munoz-Arjonilla, A.J., et al. (7 authors) 2010, A&A 519, A15 +J1806-2924 2007MNRAS.382.1751R Riggio, A., et al. (7 authors) 2007, MNRAS 382, 1751 (Orb.Per., 3. Per.) +J1806-2924 2008ApJ...678.1316C Chou, Y., Chung, Y., Hu, C.P., Yang, T.C. 2008, ApJ 678, 1316 (Orb.Per., 3. Per.) +J1806-2924 2008ApJ...678.1273R Riggio, A., et al. (7 authors) 2008, ApJ 678, 1273 +J1806-2924 2009A&A...508..297D D'Avanzo, P., et al. (6 authors) 2009, A&A 508, 297 +J1806-2924 2009ApJ...698L..60P Patruno, A., Wijnands, R., van der Klis, M. 2009, ApJ 698, L60 (3. Per.) +J1806-2924 2010ApJ...717.1253P Patruno, A., Hartman, J.M., Wijnands, R., Chakrabarty, D., van der Klis, M. 2010, ApJ 717, 1253 (Orb.Per., 3. Per.) +1811-1710 1999MNRAS.306..417B Bandyopadhyay, R.M., Shahbaz, T., Charles, P.A., Naylor, T. 1999, MNRAS 306,417 (Spectr2) +1811-1710 2003ApJ...595.1086C Corbet, R.H.D. 2003, ApJ 595, 1086 (Orb.Per.) +1811-1710 2010ApJ...719..979C Corbet, R.H.D., Pearlman, A.B., Buxton, M., Levine, A.M. 2010, ApJ 719, 979 +1813-1403 1986IAUC.4235....0H Hertz, P., Wood, K.S. 1986, IAU Circ. No. 4235 (Orb.Per.) +1813-1403 2002ApJ...574L.143C Callanan, P.J., et al. (9 authors) 2002, ApJ 574, L143 +1813-1403 2002A&A...382..947K Kuulkers, E., Homan, J., van der Klis, M., Lewin, W.H.G., Mendez, M. 2002, A&A 382, 947 +1820-3023 1997ApJ...482L..69A Anderson, S.F., Margon, B., Deutsch, E.W., Downes, R.A., Allen, R.G. 1997, ApJ 482, L69 (Orb.Per.) +1820-3023 ................... Levine, A.M., Corbet, R. 2006, ATel #940 (Orb.Per.) +1820-3023 2010ApJ...719.1807G Guever, T., Wroblewski, P., Camarota, L., Oezel, F. 2010, ApJ 719, 1807 +1820-3023 2010ApJ...712..653W Wang, Z., Chakrabarty, D. 2010, ApJ 712, 653 (2. Per.) +1822-0002 2005ApJ...627..926J Juett, A.M., Chakrabarty, D. 2005, ApJ 627, 926 +1822-0002 2007MNRAS.376.1886S Shahbaz, T., Watson, C.A., Hernandez-Peralta, H. 2007, MNRAS 376, 1886 (Orb.Per.) +1850-0846 1996MNRAS.282L..37H Homer, L., et al. (6 authors) 1996, MNRAS 282, L37 (Orb.Per.) +1850-0846 2005A&A...443..223S Sidoli, L., La Palombara, N., Oosterbroek, T., Parmar, A.N. 2005, A&A 443, 223 +J1900-2455 2006ApJ...638..963K Kaaret, P., Morgan, E.H., Vanderspeck, R., Tomsick, J.A. 2006, ApJ 638, 963 (Orb.Per., 3. Per.) +J1900-2455 2008MNRAS.383.1581E Elebert, P., et al. (7 authors) 2008, MNRAS 383, 1581 +J1900-2455 2009ApJ...698L.174W Watts, A., et al. (11 authors) 2009, ApJ 698, L174 +J1910-5959 2009AcA....59..273K Kaluzny, J., Thompson, I.B. 2009, AA 59, 273 (Orb.Per.) +J1914+0953 ................... Corbet, R.H.D., Hannikainen, D.C., Remillard, R. 2004, ATel #269 (Orb.Per.) +J1914+0953 2004A&A...423L..17H Hannikainen, D.C., et al. (8 authors) 2004, A&A 423, L17 +J1914+0953 2005A&A...432..235R Rodriguez, J., et al. (6 authors) 2004, A&A 432, 235 +J1914+0953 ................... Schultz, J., et al. (8 authors) 2004, in: The INTEGRAL Universe, V. Schoenfelder, G. Lichti, C. Winkler (eds.), ESA SP-552, p. 243 diff --git a/astropy/io/ascii/tests/t/cds/multi/ReadMe b/astropy/io/ascii/tests/t/cds/multi/ReadMe new file mode 100644 index 0000000..c4e9534 --- /dev/null +++ b/astropy/io/ascii/tests/t/cds/multi/ReadMe @@ -0,0 +1,64 @@ +J/MNRAS/301/1031 High resolution spectra of VLM stars (Tinney+ 1998) +================================================================================ +High resolution spectra of Very Low-Mass Stars + Tinney C.G., Reid I.N. + + =1998MNRAS.301.1031T +================================================================================ +ADC_Keywords: Stars, dwarfs ; Stars, late-type ; Spectroscopy + +Description: + A high resolution optical spectral atlas for three very low-mass + stars are provided, along with a high resolution observation of + an atmospheric absorption calibrator. This is the data used to + produce Figures 4-9 in the paper. + + These data were acquired with CASPEC on the ESO3.6m telescope. + The FWHM resolution is 16km/s (eg. 0.043nm at 800nm), at a dispersion + of 9km/s. Incomplete wavelength coverage produces inter-order gaps + at wavelengths longer than 804.5nm. + +Objects: + --------------------------------------------------------------------- + RA (2000) DE Designation(s) (File) + --------------------------------------------------------------------- + 16 55 35.7 -08 23 36 VB 8 = LHS 429 = Gl 644 C (vb8.dat) + 08 53 36 -03 29 30 LHS 2065 = LP 666-9 (lhs2065.dat) + 03 39 34.6 -35 25 51 LP 944-20 (lp944-20.dat) + 05 45 59.9 -32 18 23 {mu} Col = HR 1996 = HD 38666 (mucol.dat) + --------------------------------------------------------------------- + +File Summary: +--------------------------------------------------------------------- + FileName Lrecl Records Explanations +--------------------------------------------------------------------- +ReadMe 80 . This file +vb8.dat 26 14390 Spectrum for VB8 +lhs2065.dat 26 14390 Spectrum for LHS2065 +lp944-20.dat 26 14390 Spectrum for LP944-20 +mucol.dat 23 14390 Atmospheric Spectrum for Mu Columbae +--------------------------------------------------------------------- + +Byte-by-byte Description of file: vb8.dat, lhs2065.dat +Byte-by-byte Description of file: lp944-20.dat +------------------------------------------------------------------------- + Bytes Format Units Label Explanations +------------------------------------------------------------------------- + 1- 12 F12.2 0.1nm Lambda Central wavelength of the flux bin + 13- 26 A14.9 mJy Fnu Data in interorder gaps has value 0.0 +------------------------------------------------------------------------- + +Byte-by-byte Description of file: mucol.dat +------------------------------------------------------------------------- + Bytes Format Units Label Explanations +------------------------------------------------------------------------- + 1- 12 F12.2 0.1nm Lambda Central wavelength of the flux bin + 13- 23 F11.6 --- Fnu *Data in interorder gaps has value 0.0 +------------------------------------------------------------------------- +Note on Fnu: + mJy which have been normalised to value 1.0 + in the continuum of the atmospheric standard star +------------------------------------------------------------------------- + +================================================================================ +(End) C.G. Tinney [AAO] 04-Feb-1999 diff --git a/astropy/io/ascii/tests/t/cds/multi/lhs2065.dat b/astropy/io/ascii/tests/t/cds/multi/lhs2065.dat new file mode 100644 index 0000000..29875a7 --- /dev/null +++ b/astropy/io/ascii/tests/t/cds/multi/lhs2065.dat @@ -0,0 +1,18 @@ + 6476.09 0.383329 + 6476.28 0.515559 + 6476.47 0.288042 + 6476.66 0.373343 + 6476.85 0.472194 + 6477.04 0.352547 + 6477.23 0.215444 + 6477.42 0.371470 + 6477.61 0.382175 + 6477.80 0.300221 + 6477.99 0.252524 + 6478.18 0.346887 + 6478.37 0.389587 + 6478.56 0.328543 + 6478.75 0.328281 + 6478.94 0.294363 + 6479.13 0.336826 + 6479.32 0.285937 diff --git a/astropy/io/ascii/tests/t/cds/multi/lp944-20.dat b/astropy/io/ascii/tests/t/cds/multi/lp944-20.dat new file mode 100644 index 0000000..371d7f8 --- /dev/null +++ b/astropy/io/ascii/tests/t/cds/multi/lp944-20.dat @@ -0,0 +1,18 @@ + 6476.09 0.342236 + 6476.28 0.380582 + 6476.47 0.429476 + 6476.66 0.463431 + 6476.85 0.475528 + 6477.04 0.387025 + 6477.23 0.304608 + 6477.42 0.404995 + 6477.61 0.388829 + 6477.80 0.264535 + 6477.99 0.715199 + 6478.18 0.656017 + 6478.37 0.327062 + 6478.56 0.245733 + 6478.75 0.403018 + 6478.94 7.89686E-02 + 6479.13 0.321100 + 6479.32 0.489005 \ No newline at end of file diff --git a/astropy/io/ascii/tests/t/cds2.dat b/astropy/io/ascii/tests/t/cds2.dat new file mode 100644 index 0000000..fc89d3d --- /dev/null +++ b/astropy/io/ascii/tests/t/cds2.dat @@ -0,0 +1,287 @@ +Title: The Taurus Spitzer Survey: New Candidate Taurus Members Selected + Using Sensitive Mid-Infrared Photometry +Authors: Rebull L.M., Padgett D.L., McCabe C.-E., Hillenbrand L.A., + Stapelfeldt K.R., Noriega-Crespo A., Carey S.J., Brooke T., Huard T., + Terebey S., Audard M., Monin J.-L., Fukagawa M., Gudel M., Knapp G.R., + Menard F., Allen L.E., Angione J.R., Baldovin-Saavedra C., Bouvier J., + Briggs K., Dougados C., Evans N.J., Flagey N., Guieu S., Grosso N., + Glauser A.M., Harvey P., Hines D., Latter W.B., Skinner S.L., + Strom S., Tromp J., Wolf S. +Table: Spitzer measurements for sample of previously identified Taurus members +================================================================================ +Byte-by-byte Description of file: apjs326455t4_mrt.txt +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 15 A15 --- SST Spitzer Tau name + 17- 39 A23 --- CName Common name + 41 A1 --- l_3.6mag Limit flag on 3.6mag + 42- 47 F6.2 mag 3.6mag Spitzer/IRAC 3.6 micron band magnitude (1) + 49- 52 F4.2 mag e_3.6mag ? Uncertainty in 3.6mag + 54 A1 --- l_4.5mag Limit flag on 4.5mag + 55- 60 F6.2 mag 4.5mag ? Spitzer/IRAC 4.5 micron band magnitude (1) + 62- 65 F4.2 mag e_4.5mag ? Uncertainty in 4.5mag + 67 A1 --- l_5.8mag Limit flag on 5.8mag + 68- 73 F6.2 mag 5.8mag Spitzer/IRAC 5.8 micron band magnitude (1) + 75- 78 F4.2 mag e_5.8mag ? Uncertainty in 5.8mag + 80 A1 --- l_8mag Limit flag on 8.0mag + 81- 86 F6.2 mag 8mag ? Spitzer/IRAC 8.0 micron band magnitude (1) + 88- 91 F4.2 mag e_8mag ? Uncertainty in 8mag + 93 A1 --- l_24mag Limit flag on 24mag + 94-100 F7.2 mag 24mag ? Spitzer/MIPS 24 micron band magnitude (1) + 102-105 F4.2 mag e_24mag ? Uncertainty in 24mag + 107 A1 --- l_70mag Limit flag on 70mag + 108-114 F7.2 mag 70mag ? Spitzer/MIPS 70 micron band magnitude (1) + 116-119 F4.2 mag e_70mag ? Uncertainty in 70mag + 121 A1 --- l_160mag Limit flag on 160mag + 122-128 F7.2 mag 160mag ? Spitzer/MIPS 160 micron band magnitude (1) + 130-133 F4.2 mag e_160mag ? Uncertainty in 160mag + 135-137 A3 --- ID24/70 Identification in 24/70 micron color-magnitude + diagram + 139-147 A9 --- IDKS/24 Identification in Ks/70 micron + color-magnitude diagram + 149-157 A9 --- ID8/24 Identification in 8/24 micron color-magnitude + diagram + 159-167 A9 --- ID4.5/8 Identification in 4.5/8 micron color-magnitude + diagram + 169-171 A3 --- IDIRAC Identification in IRAC color-color diagram + 173-175 A3 --- Note Additional note (2) +-------------------------------------------------------------------------------- +Note (1): To convert between magnitudes and flux densities, we use + M= 2.5 log(F_zeropt_/F) where the zero-point flux densities for the + seven Spitzer bands are 280.9, 179.7, 115.0, and 64.13 Jy for IRAC + and 7.14, 0.775, and 0.159 Jy for MIPS. IRAC effective wavelengths + are 3.6, 4.5, 5.8, and 8.0 microns; MIPS effective wavelengths are + 24, 70, and 160 microns. +Note (2): + b = MIPS-160 flux density for this object is subject to confusion with a + nearby source or sources. + c = MIPS-160 flux density for this object is compromised by missing and/or + saturated data. + d = MIPS-160 flux density for this object is hard saturated. + e = IRAC flux densities for 043835.4+261041=HV Tau C do not appear in our + automatically-extracted catalog. Flux densities here are those from + Hartmann et al. (2005); since their observations have more redundancy + at IRAC bands, they are able to obtain reliable flux densities for + this object at IRAC bands. MIPS flux densities are determined from + our data. + f = The image morphology around 041426.2+280603 is complex; careful PSF + subtraction and modeling will be required to apportion flux densities + among the three local maxima seen in close proximity in the IRAC + images, which may or may not be three physically distinct sources. +-------------------------------------------------------------------------------- +041314.1+281910 LkCa 1 8.54 0.05 8.50 0.05 8.41 0.05 8.43 0.05 8.28 0.08 > 1.30 no no no no +041327.2+281624 Anon 1 7.23 0.05 7.24 0.05 7.17 0.05 7.08 0.05 6.95 0.05 > 1.27 no no no no +041353.2+281123 IRAS04108+2803 A 9.02 0.05 8.37 0.05 7.67 0.05 6.57 0.05 3.44 0.04 > 0.19 > -2.05 yes yes yes yes b +041354.7+281132 IRAS04108+2803 B 9.38 0.05 8.03 0.05 6.96 0.05 5.78 0.05 1.38 0.04 -1.84 0.22 > -1.94 yes yes yes yes yes b +041357.3+291819 IRAS04108+2910 7.48 0.05 6.84 0.05 6.26 0.05 5.54 0.05 3.13 0.04 1.15 0.22 > -3.39 yes yes yes yes yes +041411.8+281153 J04141188+2811535 10.93 0.06 10.40 0.05 10.12 0.07 8.99 0.06 5.76 0.01 > 1.03 yes yes yes yes +041412.2+280837 IRAS04111+2800G 13.19 0.06 11.90 0.06 11.19 0.06 10.39 0.06 3.47 0.04 -0.33 0.22 yes yes-faint yes-faint yes +041412.9+281212 V773 Tau ABC < 6.62 < 6.10 5.13 0.05 4.38 0.05 1.69 0.04 0.27 0.22 yes yes yes +041413.5+281249 FM Tau 8.09 0.05 7.67 0.05 7.36 0.05 6.42 0.05 2.92 0.04 1.07 0.22 yes yes yes yes yes +041414.5+282758 FN Tau 7.59 0.05 7.17 0.05 6.71 0.05 5.75 0.05 2.03 0.04 -0.25 0.22 yes yes yes yes yes +041417.0+281057 CW Tau < 6.62 < 6.10 5.08 0.05 4.51 0.05 1.75 0.04 -0.42 0.22 yes yes yes +041417.6+280609 CIDA-1 8.67 0.05 8.13 0.05 7.59 0.05 6.71 0.05 3.53 0.04 1.28 0.22 yes yes yes yes yes +041426.2+280603 IRAS04113+2758 A < 6.62 < 6.10 4.63 0.05 3.79 0.05 < 0.45 -2.54 0.22 -4.35 0.34 f +041430.5+280514 MHO-3 7.22 0.05 6.49 0.05 5.75 0.05 4.53 0.05 < 0.45 -1.06 0.22 -4.15 0.34 yes yes +041447.3+264626 FP Tau 8.11 0.05 7.86 0.05 7.60 0.05 7.27 0.05 4.25 0.04 1.22 0.22 yes yes yes yes yes +041447.8+264811 CX Tau 8.48 0.05 8.13 0.05 7.68 0.05 6.63 0.05 3.35 0.04 1.23 0.22 yes yes yes yes yes +041447.9+275234 LkCa 3 AB 7.28 0.05 7.33 0.05 7.27 0.05 7.23 0.05 7.07 0.05 > 1.17 no no no no +041449.2+281230 FO Tau AB 7.53 0.05 7.17 0.05 6.72 0.05 5.92 0.05 2.83 0.04 0.79 0.22 yes yes yes yes yes +041505.1+280846 CIDA-2 8.90 0.05 8.79 0.05 8.71 0.05 8.68 0.05 8.45 0.11 > 1.31 no no no no +041514.7+280009 KPNO-1 13.23 0.13 12.72 0.22 12.94 0.09 12.81 0.10 > 10.61 > 0.90 no no +041524.0+291043 J04152409+2910434 11.86 0.05 11.79 0.05 11.66 0.06 11.48 0.06 > 10.06 > 1.14 no no +041612.1+275638 J04161210+2756385 9.38 0.05 9.04 0.05 8.71 0.05 8.30 0.05 5.37 0.04 1.55 0.22 yes yes yes yes yes +041618.8+275215 J04161885+2752155 10.88 0.05 10.78 0.05 10.67 0.06 10.68 0.06 > 9.95 > 1.26 no no +041628.1+280735 LkCa 4 8.18 0.05 8.17 0.05 8.04 0.05 8.05 0.05 7.94 0.07 > 1.20 no no no no +041639.1+285849 J04163911+2858491 10.50 0.05 10.14 0.05 9.86 0.05 9.41 0.05 7.22 0.05 > 1.24 yes yes yes yes +041733.7+282046 CY Tau 7.87 0.05 7.53 0.05 7.27 0.05 6.72 0.05 4.42 0.04 1.87 0.22 > -1.41 yes yes yes yes yes +041738.9+283300 LkCa 5 8.93 0.05 8.80 0.05 8.66 0.09 > 1.61 no +041749.5+281331 KPNO-10 10.82 0.05 10.36 0.05 9.81 0.05 8.89 0.05 5.96 0.04 1.92 0.22 yes yes yes yes yes +041749.6+282936 V410 X-ray 1 8.41 0.05 7.85 0.05 7.42 0.05 6.47 0.05 3.78 0.04 2.95 0.22 yes yes yes yes yes +041807.9+282603 V410 X-ray 3 10.04 0.05 9.94 0.05 9.90 0.06 9.80 0.05 9.27 0.21 > 1.79 yes yes-faint no no +041817.1+282841 V410 Anon 13 10.23 0.05 9.94 0.05 9.49 0.05 8.81 0.05 6.04 0.04 > -0.57 yes yes yes yes +041822.3+282437 V410 Anon 24 9.84 0.05 9.54 0.05 9.34 0.05 9.38 0.05 9.29 0.10 > 1.60 > 1.50 yes no no no +041829.0+282619 V410 Anon 25 8.87 0.05 8.64 0.05 8.46 0.05 8.39 0.05 8.15 0.08 > 1.67 > 0.12 yes no no no +041830.3+274320 KPNO-11 10.71 0.05 10.59 0.05 10.60 0.06 10.50 0.06 > 10.01 > 1.46 no no +041831.1+282716 V410 Tau ABC 7.36 0.05 7.34 0.05 7.34 0.05 7.25 0.05 7.14 0.06 > 1.65 no no no no +041831.1+281629 DD Tau AB < 6.62 < 6.10 5.29 0.05 4.48 0.05 1.75 0.04 -0.04 0.22 yes yes yes +041831.5+281658 CZ Tau AB 8.46 0.05 7.63 0.05 6.62 0.05 5.00 0.05 1.96 0.04 1.45 0.22 yes yes yes yes yes +041832.0+283115 IRAS04154+2823 7.57 0.05 7.07 0.05 6.12 0.05 5.52 0.05 1.91 0.04 -0.38 0.22 yes yes yes yes yes +041834.4+283030 V410 X-ray 2 8.35 0.05 8.09 0.05 7.80 0.05 7.55 0.05 3.41 0.04 0.31 0.22 yes yes yes yes no +041840.2+282424 V410 X-ray 4 8.91 0.05 8.64 0.05 8.44 0.05 8.43 0.05 8.09 0.08 > 1.60 > -2.59 yes no no no +041840.6+281915 V892 Tau < 6.62 < 6.10 3.61 0.05 < 3.52 < 0.45 < -2.30 < -4.90 c d +041841.3+282725 LR1 9.50 0.05 8.92 0.05 8.44 0.05 7.95 0.05 4.65 0.04 0.38 0.22 > -0.29 yes yes yes yes yes +041842.5+281849 V410 X-ray 7 8.73 0.05 8.61 0.05 8.35 0.05 8.09 0.07 5.15 0.01 > -0.30 > -2.88 yes yes yes no +041845.0+282052 V410 Anon 20 11.01 0.05 10.74 0.05 10.55 0.06 10.57 0.06 > 10.20 > 0.49 > -3.20 no no +041847.0+282007 Hubble 4 7.09 0.05 7.04 0.05 6.95 0.05 6.96 0.05 6.78 0.01 > 0.18 > -4.19 no no no no +041851.1+281433 KPNO-2 12.25 0.05 12.11 0.06 12.02 0.06 11.84 0.07 > 9.59 > 1.59 no no +041851.4+282026 CoKu Tau/1 10.22 0.05 9.02 0.05 7.72 0.05 5.87 0.05 1.07 0.04 -0.98 0.22 < -2.55 yes yes yes yes yes c +041858.1+281223 IRAS04158+2805 9.23 0.05 8.54 0.05 7.85 0.06 6.84 0.05 2.73 0.04 -0.07 0.22 -2.51 0.22 yes yes yes yes yes +041901.1+281942 V410 X-ray 6 8.76 0.05 8.67 0.05 8.54 0.05 8.26 0.05 3.82 0.04 0.69 0.22 yes yes yes yes no +041901.2+280248 KPNO-12 13.97 0.06 13.61 0.06 13.23 0.08 12.75 0.08 > 10.13 > 1.74 > -0.54 no no +041901.9+282233 V410 Tau X-ray 5a 9.64 0.05 9.55 0.05 9.43 0.05 9.39 0.05 8.88 0.12 > 1.59 > -1.28 yes yes no no +041912.8+282933 FQ Tau AB 8.78 0.05 8.42 0.05 8.12 0.05 7.41 0.05 4.85 0.04 2.16 0.22 yes yes yes yes yes +041915.8+290626 BP Tau 7.27 0.05 6.90 0.05 6.65 0.05 5.71 0.05 2.52 0.04 0.71 0.22 yes yes yes yes yes +041926.2+282614 V819 Tau 8.20 0.05 8.29 0.05 8.11 0.05 8.06 0.05 6.29 0.05 yes yes no no +041935.4+282721 FR Tau 9.42 0.05 8.93 0.05 8.25 0.05 7.27 0.05 4.84 0.04 3.14 0.22 yes yes yes yes yes +041941.2+274948 LkCa 7 AB 8.11 0.05 8.11 0.05 8.04 0.05 7.99 0.05 7.75 0.06 > 1.94 no no no no +041942.5+271336 IRAS04166+2706 12.84 0.06 11.32 0.05 10.49 0.06 9.75 0.06 2.93 0.04 -1.92 0.22 -4.53 0.34 yes yes-faint yes-faint yes +041958.4+270957 IRAS04169+2702 8.41 0.05 7.15 0.05 6.29 0.05 5.33 0.05 0.66 0.04 < -2.30 -5.40 0.34 yes yes yes yes +042025.5+270035 J04202555+2700355 10.99 0.05 10.77 0.05 10.44 0.06 9.74 0.05 6.13 0.04 2.49 0.22 yes yes yes-faint yes yes +042039.1+271731 2MASS J04203918+2717317 9.42 0.05 9.39 0.05 9.35 0.05 9.29 0.05 8.83 0.10 > 1.50 no no no no +042107.9+270220 CFHT-19 7.54 0.05 6.66 0.05 6.01 0.05 5.10 0.05 1.61 0.04 -1.18 0.22 < -3.27 yes yes yes yes yes c +042110.3+270137 IRAS04181+2654B 9.03 0.05 8.24 0.05 7.60 0.05 6.70 0.05 2.69 0.04 -0.47 0.22 < -3.97 yes yes yes yes yes b c +042111.4+270109 IRAS04181+2654A 8.60 0.05 7.56 0.05 6.71 0.05 5.71 0.05 1.64 0.04 -1.04 0.22 -4.21 0.34 yes yes yes yes yes b +042134.5+270138 J04213459+2701388 9.86 0.05 9.65 0.05 9.35 0.05 8.98 0.05 7.18 0.05 > 1.64 yes yes yes yes +042146.3+265929 CFHT-10 11.54 0.05 11.32 0.05 11.05 0.06 10.45 0.06 7.26 0.05 > 1.45 yes no yes-faint yes +042154.5+265231 J04215450+2652315 13.22 0.06 13.12 0.06 12.90 0.07 12.80 0.08 10.50 0.22 > 1.66 yes-faint no no no +042155.6+275506 DE Tau 7.07 0.05 6.73 0.05 6.40 0.05 5.78 0.05 2.58 0.04 -0.19 0.22 yes yes yes yes yes +042157.4+282635 RY Tau < 6.62 < 6.10 3.60 0.05 < 3.52 < 0.45 < -2.30 -4.24 0.34 +042158.8+281806 HD283572 6.86 0.05 6.86 0.05 6.81 0.05 6.78 0.05 6.76 0.05 > 1.24 no no no no +042200.6+265732 FS Tau B 9.66 0.05 8.40 0.05 7.23 0.05 5.95 0.05 1.58 0.04 -0.68 0.22 < -4.14 yes yes yes yes yes b c +042202.1+265730 FS Tau Aab 6.75 0.05 6.30 0.05 5.81 0.05 4.99 0.05 1.33 0.04 > 0.05 yes yes yes yes +042203.1+282538 LkCa 21 8.26 0.05 8.22 0.05 8.14 0.05 8.06 0.05 8.06 0.09 > 1.23 no no no no +042216.4+254911 CFHT-14 11.48 0.05 11.34 0.05 11.28 0.06 11.23 0.06 > 9.51 > 1.16 no no +042216.7+265457 CFHT-21 7.77 0.05 7.26 0.05 6.85 0.05 6.30 0.05 3.29 0.04 1.18 0.22 yes yes yes yes yes +042224.0+264625 2MASS J04222404+2646258 9.52 0.05 9.40 0.05 9.34 0.05 9.33 0.05 9.07 0.12 > 1.56 no no no no +042307.7+280557 IRAS04200+2759 8.43 0.05 7.81 0.05 7.28 0.05 6.44 0.05 3.23 0.04 0.76 0.22 yes yes yes yes yes +042339.1+245614 FT Tau 7.93 0.05 7.46 0.05 7.19 0.05 6.29 0.05 3.15 0.04 0.28 0.22 yes yes yes yes yes +042426.4+264950 CFHT-9 11.16 0.05 10.88 0.05 10.51 0.06 9.83 0.05 6.78 0.05 > 0.77 yes yes-faint yes yes +042444.5+261014 IRAS04216+2603 8.08 0.05 7.57 0.05 7.14 0.05 6.32 0.05 3.53 0.04 0.16 0.22 -2.47 0.22 yes yes yes yes yes +042445.0+270144 J1-4423 10.21 0.05 10.15 0.05 10.06 0.06 10.11 0.06 > 9.49 > 1.05 no no +042449.0+264310 RXJ0424.8 7.73 0.05 7.70 0.05 7.69 0.05 7.65 0.05 7.40 0.06 > 1.02 no no no no +042457.0+271156 IP Tau 7.77 0.05 7.45 0.05 7.24 0.05 6.60 0.05 3.48 0.04 0.74 0.22 yes yes yes yes yes +042517.6+261750 J1-4872 AB 8.21 0.05 8.20 0.05 8.08 0.05 8.06 0.05 7.77 0.07 > 1.10 no no no no +042629.3+262413 KPNO-3 11.41 0.05 10.99 0.05 10.49 0.06 9.72 0.05 6.86 0.05 > 1.09 yes yes-faint yes yes +042630.5+244355 J04263055+2443558 12.57 0.05 12.21 0.06 11.76 0.06 11.08 0.06 8.87 0.15 > 1.09 yes no no yes +042653.5+260654 FV Tau AB < 6.62 < 6.10 5.23 0.05 4.56 0.05 1.54 0.04 -0.45 0.22 yes yes yes +042654.4+260651 FV Tau/c AB 8.01 0.05 7.58 0.05 7.05 0.05 6.29 0.05 3.88 0.04 > 0.72 > -1.64 yes yes yes yes +042656.2+244335 IRAS04239+2436 7.61 0.05 6.32 0.05 5.38 0.05 4.50 0.05 < 0.45 -2.25 0.22 -4.63 0.34 yes yes +042657.3+260628 KPNO-13 8.75 0.05 8.33 0.05 7.99 0.06 7.35 0.05 5.32 0.04 > 0.93 > -2.25 yes yes yes yes +042702.6+260530 DG Tau B 8.77 0.05 5.88 0.05 5.24 0.05 0.78 0.04 -2.24 0.22 -5.12 0.34 yes yes yes b +042702.8+254222 DF Tau AB < 6.62 < 6.10 5.08 0.05 4.50 0.05 2.19 0.04 0.70 0.22 yes yes yes +042704.6+260616 DG Tau A < 6.62 < 6.10 4.67 0.05 3.55 0.05 < 0.45 < -2.30 < -4.46 b c +042727.9+261205 KPNO-4 12.57 0.05 12.37 0.06 12.21 0.06 12.08 0.06 10.66 0.28 > 1.05 yes no no no +042745.3+235724 CFHT-15 13.24 0.06 13.15 0.06 13.25 0.07 13.05 0.10 > 10.55 > 1.06 no no +042757.3+261918 IRAS04248+2612 AB 9.83 0.06 9.10 0.05 8.28 0.05 7.10 0.05 2.27 0.04 -1.52 0.22 -4.39 0.34 yes yes yes yes yes +042838.9+265135 LDN 1521F-IRS 15.33 0.08 14.25 0.07 13.45 0.10 12.04 0.07 6.16 0.04 0.57 0.22 -4.28 0.34 yes yes-faint no no +042842.6+271403 J04284263+2714039 AB 9.76 0.05 9.53 0.05 9.21 0.05 8.83 0.05 6.21 0.05 > 0.88 yes yes yes yes +042900.6+275503 J04290068+2755033 12.30 0.05 11.99 0.05 11.60 0.06 10.92 0.06 8.06 0.07 > 0.88 yes no no yes +042904.9+264907 IRAS04260+2642 10.08 0.05 9.40 0.05 8.83 0.05 8.07 0.05 3.60 0.04 0.06 0.22 yes yes yes yes yes +042920.7+263340 J1-507 8.56 0.05 8.55 0.05 8.47 0.05 8.46 0.05 8.29 0.10 > 1.04 no no no no +042921.6+270125 IRAS04263+2654 8.06 0.05 7.67 0.05 7.31 0.05 6.69 0.05 3.41 0.04 1.11 0.22 yes yes yes yes yes +042923.7+243300 GV Tau AB < 6.62 < 6.10 < 3.49 < 3.52 < 0.45 < -2.30 < -1.49 c d +042929.7+261653 FW Tau ABC 9.09 0.05 9.01 0.05 8.88 0.05 8.88 0.05 7.52 0.06 > 1.07 yes yes no no +042930.0+243955 IRAS04264+2433 10.21 0.05 9.43 0.05 8.60 0.05 6.72 0.05 1.12 0.04 -1.37 0.22 > -1.94 yes yes yes yes yes +042941.5+263258 DH Tau AB 7.63 0.05 7.33 0.05 7.20 0.05 6.86 0.05 3.37 0.04 0.82 0.22 yes yes yes yes yes +042942.4+263249 DI Tau AB 8.21 0.05 8.22 0.05 8.14 0.05 8.11 0.05 > 0.72 no no +042945.6+263046 KPNO-5 11.05 0.05 11.02 0.05 10.94 0.06 10.83 0.06 > 9.71 > 0.90 no no +042951.5+260644 IQ Tau 6.81 0.05 6.37 0.05 6.07 0.05 5.53 0.05 2.82 0.04 0.32 0.22 yes yes yes yes yes +042959.5+243307 CFHT-20 9.02 0.05 8.55 0.05 8.32 0.05 7.84 0.05 4.91 0.04 2.06 0.22 yes yes yes yes yes +043007.2+260820 KPNO-6 13.12 0.06 12.77 0.06 12.42 0.06 11.58 0.06 9.20 0.19 > 1.01 yes-faint no no yes +043023.6+235912 CFHT-16 13.23 0.06 13.15 0.06 13.04 0.08 12.99 0.09 > 10.54 > 1.00 no no +043029.6+242645 FX Tau AB 7.22 0.05 6.96 0.05 6.69 0.05 5.97 0.05 3.03 0.04 1.09 0.22 yes yes yes yes yes +043044.2+260124 DK Tau AB < 6.62 < 6.10 5.52 0.05 4.78 0.05 1.85 0.04 0.08 0.22 0.57 0.22 yes yes yes +043050.2+230008 IRAS04278+2253 < 6.62 < 6.10 < 3.49 < 3.52 < 0.45 -1.87 0.22 -3.88 0.34 +043051.3+244222 ZZ Tau AB 8.08 0.05 7.90 0.05 7.61 0.05 6.94 0.05 4.53 0.04 > 0.72 > -4.46 yes yes yes yes b +043051.7+244147 ZZ Tau IRS 8.11 0.05 7.38 0.05 6.73 0.05 5.78 0.05 2.01 0.04 -0.99 0.22 -3.61 0.22 yes yes yes yes yes b +043057.1+255639 KPNO-7 12.62 0.05 12.28 0.05 11.99 0.06 11.25 0.06 8.62 0.12 > 1.23 yes no no yes +043114.4+271017 JH56 8.72 0.05 8.75 0.05 8.66 0.05 8.60 0.05 6.76 0.02 > 0.75 yes yes no no +043119.0+233504 J04311907+2335047 11.66 0.05 11.53 0.05 11.56 0.06 11.46 0.06 > 10.59 > 0.86 no no +043123.8+241052 V927 Tau AB 8.52 0.05 8.47 0.05 8.38 0.05 8.38 0.05 8.19 0.09 > 0.91 no no no no +043126.6+270318 CFHT-13 12.90 0.06 12.75 0.06 12.72 0.07 12.70 0.07 10.72 0.29 > 0.68 yes no no no +043150.5+242418 HK Tau AB 7.71 0.05 7.35 0.05 7.10 0.05 6.58 0.05 2.31 0.04 -0.81 0.22 -3.02 0.22 yes yes yes yes yes +043158.4+254329 J1-665 9.35 0.05 9.29 0.05 9.24 0.05 9.22 0.05 9.04 0.17 > 1.08 no no no no +043203.2+252807 J04320329+2528078 10.30 0.05 10.20 0.05 10.13 0.06 10.09 0.06 > 9.67 > 1.03 no no +043215.4+242859 Haro6-13 < 6.62 < 6.10 5.49 0.05 4.85 0.05 0.88 0.04 -1.43 0.22 -4.02 0.34 yes yes yes +043217.8+242214 CFHT-7 AB 9.98 0.05 9.87 0.05 9.76 0.05 9.72 0.05 9.30 0.28 > 0.86 yes no no no +043218.8+242227 V928 Tau AB 7.86 0.05 7.82 0.05 7.72 0.05 7.64 0.05 7.54 0.06 > 0.84 no no no no +043223.2+240301 J04322329+2403013 10.89 0.05 10.83 0.05 10.79 0.06 10.67 0.06 > 9.82 > 0.91 no no +043230.5+241957 FY Tau 7.18 0.05 6.76 0.05 6.50 0.05 5.99 0.05 3.67 0.04 yes yes yes yes +043231.7+242002 FZ Tau < 6.62 < 6.10 5.27 0.05 4.58 0.05 2.06 0.04 0.31 0.22 yes yes yes +043232.0+225726 IRAS04295+2251 8.63 0.05 7.72 0.05 6.83 0.05 5.32 0.05 1.40 0.04 -1.32 0.22 -3.93 0.34 yes yes yes yes yes +043243.0+255231 UZ Tau Aab < 6.62 < 6.10 5.63 0.05 4.79 0.05 1.54 0.04 -0.69 0.22 -2.15 0.22 yes yes yes b +043249.1+225302 JH112 7.41 0.05 7.12 0.05 6.83 0.05 5.89 0.05 2.53 0.04 0.72 0.22 yes yes yes yes yes +043250.2+242211 CFHT-5 10.46 0.05 10.27 0.05 10.09 0.06 10.07 0.06 9.56 0.29 > 1.16 > -1.44 yes no no no +043301.9+242100 MHO-8 9.32 0.05 9.21 0.05 9.14 0.05 9.09 0.05 8.92 0.15 > 0.88 no no no no +043306.2+240933 GH Tau AB 7.08 0.05 6.77 0.05 6.50 0.05 6.03 0.05 3.17 0.04 0.43 0.22 yes yes yes yes yes +043306.6+240954 V807 Tau AB < 6.62 6.21 0.05 5.96 0.05 5.57 0.05 2.96 0.04 0.36 0.22 yes yes yes yes +043307.8+261606 KPNO-14 9.78 0.05 9.67 0.06 9.60 0.05 9.58 0.05 9.04 0.12 > 1.44 > -1.91 yes yes-faint no no +043309.4+224648 CFHT-12 10.86 0.05 10.63 0.05 10.34 0.06 9.95 0.06 8.25 0.07 > 1.16 yes yes-faint yes yes +043310.0+243343 V830 Tau 8.41 0.05 8.41 0.05 8.37 0.05 8.32 0.05 8.14 0.08 > 1.03 no no no no +043314.3+261423 IRAS04301+2608 12.05 0.05 11.72 0.05 11.29 0.06 9.54 0.05 3.28 0.04 1.12 0.22 > -0.95 yes yes yes-faint yes-faint yes +043316.5+225320 IRAS04302+2247 10.29 0.05 9.88 0.05 9.72 0.05 9.71 0.06 3.57 0.04 -1.88 0.22 -4.51 0.34 yes yes yes-faint no no +043319.0+224634 IRAS04303+2240 < 6.62 < 6.10 4.77 0.05 3.73 0.05 1.43 0.04 -0.11 0.22 yes yes yes +043334.0+242117 GI Tau 6.87 0.05 6.31 0.05 5.79 0.05 5.12 0.05 2.15 0.04 yes yes yes yes +043334.5+242105 GK Tau < 6.62 < 6.10 5.79 0.05 5.14 0.05 1.70 0.04 -0.23 0.22 yes yes yes +043336.7+260949 IS Tau AB 7.85 0.05 7.46 0.05 6.94 0.05 6.03 0.05 3.65 0.04 2.08 0.22 > -0.83 yes yes yes yes yes +043339.0+252038 DL Tau 6.95 0.05 6.37 0.05 5.92 0.05 5.13 0.05 2.19 0.04 -0.25 0.22 -2.44 0.22 yes yes yes yes yes +043342.9+252647 J04334291+2526470 12.76 0.06 12.63 0.06 12.52 0.07 12.47 0.07 > 11.05 > 1.43 no no +043352.0+225030 CI Tau 6.99 0.05 6.53 0.05 6.17 0.05 5.33 0.05 2.37 0.04 -0.80 0.22 yes yes yes yes yes +043352.5+225626 2MASS J04335252+2256269 8.79 0.05 8.71 0.05 8.63 0.05 8.60 0.05 8.32 0.09 > 1.41 no no no no +043354.7+261327 IT Tau AB 7.35 0.05 6.98 0.05 6.63 0.05 6.05 0.05 3.53 0.04 0.83 0.22 > -1.28 yes yes yes yes yes +043410.9+225144 JH108 9.30 0.05 9.27 0.05 9.19 0.05 9.17 0.05 8.88 0.12 > 1.15 no no no no +043415.2+225030 CFHT-1 11.23 0.05 11.10 0.05 10.98 0.06 11.02 0.06 > 9.94 > 1.00 no no +043439.2+250101 Wa Tau 1 7.83 0.05 7.79 0.05 7.75 0.05 7.73 0.05 7.67 0.07 > 0.85 no no no no +043455.4+242853 AA Tau 7.29 0.05 6.84 0.05 6.44 0.05 5.65 0.05 2.81 0.04 -0.14 0.22 -2.47 0.22 yes yes yes yes yes +043508.5+231139 CFHT-11 11.19 0.05 11.12 0.05 11.04 0.06 10.99 0.06 10.23 0.20 > 0.37 yes no no no +043520.2+223214 HO Tau 8.90 0.05 8.52 0.05 8.38 0.05 7.73 0.05 4.85 0.04 2.43 0.22 yes yes yes yes yes +043520.8+225424 FF Tau AB 8.45 0.05 8.44 0.05 8.42 0.05 8.36 0.05 8.15 0.09 > 1.26 no no no no +043527.3+241458 DN Tau 7.47 0.05 7.16 0.05 6.78 0.05 6.03 0.05 3.04 0.04 0.44 0.22 > 0.04 yes yes yes yes yes +043535.3+240819 IRAS04325+2402 A 9.93 0.05 9.28 0.05 9.06 0.05 8.54 0.05 1.43 0.04 -2.26 0.22 -5.07 0.34 yes yes yes yes yes +043540.9+241108 CoKu Tau/3 AB 7.43 0.05 6.95 0.05 6.48 0.05 5.64 0.05 3.31 0.04 1.33 0.22 > -2.14 yes yes yes yes yes +043541.8+223411 KPNO-8 11.64 0.05 11.54 0.05 11.43 0.06 11.46 0.06 > 10.71 > 1.12 no no +043545.2+273713 J04354526+2737130 13.18 0.06 13.11 0.06 12.93 0.07 13.07 0.11 > 10.62 > 1.47 no no +043547.3+225021 HQ Tau < 6.62 < 6.10 5.61 0.05 4.47 0.05 1.65 0.04 -0.18 0.22 yes yes yes +043551.0+225240 KPNO-15 9.79 0.05 9.72 0.05 9.66 0.05 9.65 0.05 9.71 0.11 > 0.72 no no no no +043551.4+224911 KPNO-9 13.63 0.06 13.52 0.06 13.70 0.12 13.41 0.17 > 10.93 > 0.86 no no +043552.0+225503 2MASS J04355209+2255039 9.56 0.05 9.52 0.05 9.39 0.05 9.35 0.06 > 0.50 no no +043552.7+225423 HP Tau AB < 6.62 6.20 0.05 5.65 0.05 4.88 0.05 1.49 0.04 -1.93 0.22 -4.48 0.34 yes yes yes yes +043552.8+225058 2MASS J04355286+2250585 9.46 0.05 9.36 0.05 9.28 0.05 9.29 0.05 9.10 0.13 > 0.93 no no no no +043553.4+225408 HP Tau/G3 AB 8.62 0.05 8.60 0.05 8.51 0.05 8.47 0.06 > -0.03 > -2.19 no no +043554.1+225413 HP Tau/G2 7.19 0.05 7.17 0.05 7.11 0.05 7.02 0.05 > -0.03 > -2.35 no no +043556.8+225436 Haro 6-28 AB 8.61 0.05 8.18 0.05 7.85 0.05 7.14 0.05 4.39 0.04 > 0.70 > -2.24 yes yes yes yes +043558.9+223835 2MASS J04355892+2238353 8.15 0.05 8.19 0.05 8.10 0.05 8.06 0.05 > 1.12 no no +043610.3+215936 J04361030+2159364 13.02 0.06 12.74 0.06 12.41 0.06 11.74 0.06 9.01 0.18 > 1.07 yes-faint no no yes +043610.3+225956 CFHT-2 11.63 0.05 11.43 0.05 11.34 0.06 11.32 0.06 > 10.61 > 1.48 > -3.51 no no +043619.0+254258 LkCa 14 8.52 0.05 8.54 0.05 8.51 0.05 8.45 0.05 8.24 0.10 > 0.99 > -0.98 no no no no +043638.9+225811 CFHT-3 11.79 0.05 11.69 0.05 11.59 0.06 11.57 0.06 > 8.55 > 1.12 no no +043649.1+241258 HD 283759 8.32 0.05 8.25 0.05 8.30 0.05 8.20 0.05 6.64 0.05 1.10 0.22 > 0.51 yes yes yes no no +043800.8+255857 ITG 2 9.60 0.05 9.47 0.05 9.37 0.05 9.31 0.05 9.17 0.19 > 0.96 > -2.05 no no no no +043814.8+261139 J04381486+2611399 10.80 0.05 10.21 0.05 9.64 0.05 8.92 0.05 4.98 0.04 > 0.80 > -1.11 yes yes yes yes +043815.6+230227 RXJ0438.2+2302 9.69 0.05 9.69 0.05 9.64 0.05 9.60 0.05 > 9.35 > 1.08 no no +043821.3+260913 GM Tau 9.27 0.05 8.77 0.05 8.43 0.05 7.81 0.05 5.33 0.04 > 0.97 > -1.31 yes yes yes yes +043828.5+261049 DO Tau < 6.62 < 6.10 5.26 0.05 4.77 0.05 1.09 0.04 -1.37 0.22 -3.92 0.34 yes yes yes +043835.2+261038 HV Tau AB 7.65 0.05 7.59 0.05 7.49 0.05 7.46 0.05 > 0.72 > -3.65 no no +043835.4+261041 HV Tau C 11.33 0.14 10.74 0.05 10.22 0.05 9.38 0.04 3.52 0.04 -0.09 0.22 yes no yes yes yes e +043858.5+233635 J0438586+2336352 10.51 0.05 9.84 0.05 6.39 0.05 > 0.85 yes +043901.6+233602 J0439016+2336030 9.76 0.05 9.18 0.05 6.28 0.05 > 2.30 yes +043903.9+254426 CFHT-6 10.75 0.05 10.45 0.05 10.02 0.06 9.14 0.05 6.51 0.05 > 0.47 > -0.54 yes yes yes yes c +043906.3+233417 J0439064+2334179 10.73 0.05 10.62 0.06 > 9.32 +043913.8+255320 IRAS04361+2547 AB 8.00 0.05 7.08 0.05 6.46 0.05 4.82 0.05 < 0.45 < -2.30 < -4.73 yes yes +043917.7+222103 LkCa 15 7.61 0.05 7.41 0.05 7.23 0.05 6.64 0.05 3.11 0.04 -0.40 0.22 -2.47 0.22 yes yes yes yes yes +043920.9+254502 GN Tau B 6.99 0.05 6.58 0.05 6.21 0.05 5.42 0.05 2.82 0.04 1.59 0.22 > -2.43 yes yes yes yes yes +043935.1+254144 IRAS04365+2535 7.22 0.05 < 6.10 4.87 0.05 4.16 0.05 < 0.45 -2.17 0.22 < -3.82 c +043947.4+260140 CFHT-4 9.54 0.05 9.07 0.05 8.60 0.05 7.78 0.05 4.95 0.04 > 0.91 > -4.76 yes yes yes yes +043953.9+260309 IRAS 04368+2557 13.39 0.11 11.15 0.08 10.09 0.07 9.73 0.08 2.69 0.04 < -2.30 < -4.40 yes-faint yes-faint yes c d +043955.7+254502 IC2087 IRS < 6.62 < 6.10 < 3.49 < 3.52 < 0.45 -2.17 0.22 > -5.41 c +044001.7+255629 CFHT-17 AB 10.15 0.05 9.96 0.05 9.87 0.05 9.82 0.06 9.10 0.18 > 0.74 > -2.26 yes yes-faint no no +044008.0+260525 IRAS 04370+2559 7.96 0.05 7.38 0.05 6.93 0.05 5.93 0.05 2.43 0.04 0.75 0.22 > -1.78 yes yes yes yes yes +044039.7+251906 J04403979+2519061 AB 9.84 0.05 9.68 0.06 9.62 0.05 9.57 0.05 7.55 0.05 > 1.00 > -2.43 yes yes-faint no no +044049.5+255119 JH223 8.90 0.05 8.60 0.05 8.24 0.05 7.74 0.05 5.13 0.04 2.20 0.22 > 0.93 yes yes yes yes yes +044104.2+255756 Haro 6-32 9.66 0.05 9.56 0.06 9.49 0.05 9.46 0.06 9.59 0.33 > 0.70 > -0.83 no no no no +044104.7+245106 IW Tau AB 8.13 0.05 8.15 0.05 8.08 0.05 8.03 0.05 7.97 0.07 > 1.08 no no no no +044108.2+255607 ITG 33 A 9.68 0.05 9.05 0.05 8.49 0.05 7.73 0.05 4.60 0.04 > 0.67 > 0.73 yes yes yes yes +044110.7+255511 ITG 34 10.78 0.05 10.35 0.05 9.92 0.06 9.22 0.05 6.48 0.05 > 0.74 > -1.25 yes yes yes yes +044112.6+254635 IRAS04381+2540 9.15 0.05 7.76 0.05 6.72 0.05 5.75 0.05 1.43 0.04 -1.92 0.22 -4.33 0.34 yes yes yes yes yes +044138.8+255626 IRAS04385+2550 8.24 0.05 7.74 0.05 7.13 0.05 6.05 0.05 1.86 0.04 -0.90 0.22 -2.73 0.22 yes yes yes yes yes +044148.2+253430 J04414825+2534304 11.43 0.05 10.93 0.05 10.50 0.06 9.54 0.05 6.33 0.05 > 1.02 > -4.57 yes yes-faint yes yes +044205.4+252256 LkHa332/G2 AB 7.99 0.05 7.87 0.05 7.74 0.05 7.70 0.05 7.18 0.05 > -4.69 yes yes no no b +044207.3+252303 LkHa332/G1 AB 7.65 0.05 7.62 0.05 7.53 0.05 7.51 0.06 > 0.51 > -2.34 no no b +044207.7+252311 V955 Tau Ab 6.99 0.05 6.58 0.05 6.15 0.05 5.40 0.05 2.76 0.04 -0.56 0.22 > -2.07 yes yes yes yes yes b +044221.0+252034 CIDA-7 9.51 0.05 9.11 0.05 8.65 0.05 7.79 0.05 4.20 0.04 1.13 0.22 > -1.18 yes yes yes yes yes +044237.6+251537 DP Tau 7.57 0.05 6.90 0.05 6.34 0.05 5.37 0.05 1.90 0.04 0.54 0.22 > -1.70 yes yes yes yes yes +044303.0+252018 GO Tau 8.90 0.05 8.64 0.05 8.21 0.05 7.42 0.05 4.30 0.04 1.03 0.22 > 0.53 yes yes yes yes yes +044427.1+251216 IRAS04414+2506 9.56 0.05 9.00 0.05 8.36 0.05 7.43 0.05 4.25 0.04 1.76 0.22 > 0.05 yes yes yes yes yes +044642.6+245903 RXJ04467+2459 10.05 0.05 9.97 0.05 9.87 0.06 9.90 0.05 9.53 0.26 > 0.96 no no no no diff --git a/astropy/io/ascii/tests/t/cds_malformed.dat b/astropy/io/ascii/tests/t/cds_malformed.dat new file mode 100644 index 0000000..9b24f56 --- /dev/null +++ b/astropy/io/ascii/tests/t/cds_malformed.dat @@ -0,0 +1,37 @@ + + + + +Title: Spitzer Observations of NGC 1333: A Study of Structure and Evolution + in a Nearby Embedded Cluster +Authors: Gutermuth R.A., Myers P.C., Megeath S.T., Allen L.E., Pipher J.L., + Muzerolle J., Porras A., Winston E., Fazio G. +Table: Spitzer-identified YSOs: Addendum +================================================================================ +Byte-by-byte Description of file: datafile3.txt +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 3 I3 --- Index Running identification number + 5- 6 I2 h RAh Hour of Right Ascension (J2000) + 8- 9 I2 min RAm Minute of Right Ascension (J2000) + 11- 15 F5.2 s RAs Second of Right Ascension (J2000) + - continuation of description + 17 A1 --- DE- Sign of the Declination (J2000) + 18- 19 I2 deg DEd Degree of Declination (J2000) + 21- 22 I2 arcmin DEm Arcminute of Declination (J2000) + 24- 27 F4.1 arcsec DEs Arcsecond of Declination (J2000) + 29- 68 A40 --- Match Literature match + 70- 75 A6 --- Class Source classification (1) + 77-80 F4.2 mag AK ? The K band extinction (2) + 82-86 F5.2 --- Fit ? Fit of IRAC photometry (3) +-------------------------------------------------------------------------------- +Note (1): Asterisks mark "deeply embedded" sources with questionable IRAC + colors or incomplete IRAC photometry and relatively bright + MIPS 24 micron photometry. +Note (2): Only provided for sources with valid JHK_S_ photometry. +Note (3): Defined as the slope of the linear least squares fit to the + 3.6 - 8.0 micron SEDs in log{lambda} F_{lambda} vs log{lambda} space. + Extinction is not accounted for in these values. High extinction can + bias Fit to higher values. + 1 03 28 39.09 +31 06 01.9 I* 1.35 diff --git a/astropy/io/ascii/tests/t/commented_header.dat b/astropy/io/ascii/tests/t/commented_header.dat new file mode 100644 index 0000000..d4a6371 --- /dev/null +++ b/astropy/io/ascii/tests/t/commented_header.dat @@ -0,0 +1,4 @@ +# a b c +# A comment line +1 2 3 +4 5 6 diff --git a/astropy/io/ascii/tests/t/commented_header2.dat b/astropy/io/ascii/tests/t/commented_header2.dat new file mode 100644 index 0000000..a0f8093 --- /dev/null +++ b/astropy/io/ascii/tests/t/commented_header2.dat @@ -0,0 +1,5 @@ +# A comment line +# Another comment line +# a b c +1 2 3 +4 5 6 diff --git a/astropy/io/ascii/tests/t/continuation.dat b/astropy/io/ascii/tests/t/continuation.dat new file mode 100644 index 0000000..564a4f2 --- /dev/null +++ b/astropy/io/ascii/tests/t/continuation.dat @@ -0,0 +1,4 @@ +1 3 5 \ +hello world +4 6 8 next \ +line diff --git a/astropy/io/ascii/tests/t/daophot.dat b/astropy/io/ascii/tests/t/daophot.dat new file mode 100644 index 0000000..34750be --- /dev/null +++ b/astropy/io/ascii/tests/t/daophot.dat @@ -0,0 +1,45 @@ +#K MERGERAD = INDEF scaleunit %-23.7g +#K IRAF = NOAO/IRAFV2.10EXPORT version %-23s +#K USER = name %-23s +#K HOST = tucana computer %-23s +#K DATE = 05-28-93 mm-dd-yy %-23s +#K TIME = 14:46:13 hh:mm:ss %-23s +#K PACKAGE = daophot name %-23s +#K TASK = nstar name %-23s +#K IMAGE = test imagename %-23s +#K GRPFILE = test.psg.1 filename %-23s +#K PSFIMAGE = test.psf.1 imagename %-23s +#K NSTARFILE = test.nst.1 filename %-23s +#K REJFILE = "hello world" filename %-23s +#K SCALE = 1. units/pix %-23.7g +#K DATAMIN = 50. counts %-23.7g +#K DATAMAX = 24500. counts %-23.7g +#K GAIN = 1. number %-23.7g +#K READNOISE = 0. electrons %-23.7g +#K OTIME = 00:07:59.0 timeunit %-23s +#K XAIRMASS = 1.238106 number %-23.7g +#K IFILTER = V filter %-23s +#K RECENTER = yes switch %-23b +#K FITSKY = no switch %-23b +#K PSFMAG = 16.594 magnitude %-23.7g +#K PSFRAD = 5. scaleunit %-23.7g +#K FITRAD = 3. scaleunit %-23.7g +#K MAXITER = 50 number %-23d +#K MAXGROUP = 60 number %-23d +#K FLATERROR = 0.75 percentage %-23.7g +#K PROFERROR = 5. percentage %-23.7g +#K CLIPEXP = 6 number %-23d +#K CLIPRANGE = 2.5 sigma %-23.7g +# +#N ID XCENTER YCENTER MAG MERR MSKY NITER \ +#U ## pixels pixels magnitudes magnitudes counts ## \ +#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d +# +#N SHARPNESS CHI PIER PERROR \ +#U ## ## ## perrors \ +#F %-23.3f %-12.3f %-6d %-13s +# +14 138.538 256.405 15.461 0.003 34.85955 4 \ + -0.032 0.802 0 No_error +18 18.114 280.170 22.329 0.206 30.12784 4 \ + -2.544 1.104 0 No_error diff --git a/astropy/io/ascii/tests/t/daophot.dat.gz b/astropy/io/ascii/tests/t/daophot.dat.gz new file mode 100644 index 0000000000000000000000000000000000000000..6d74933a1de2f8dd5291b953e5bf9a84c6f9aeda GIT binary patch literal 793 zcmV+!1Lpi6iwFoAbhl6d17u-uaAg~ewUXHx+93649pY`8q- zD%vFbJgoqJ^i_=v-vDlUb8tAb(arUkcPH#8l_=C&`W_GAJ=v9MkxPeCkRJn!BRP)V z(-QJ1OFHP@3qpjd{VK{MSQXiJUnRQ^pR&+GoEm-A?2k0avVI)*KR-K0=qv<7(Z~8= ztZ1-akF#uCmL0=qlR2ANQbDm@7uA`DkOgxD^RkL|XOOd~9jm0QAkCt;LweO{FpI9npF>IS_^nK9{)8S;Jh(u}XcxW%Tf=LB+! z1d>g%s&++w;_o#xq;yP&V`D@pRB2YP-Jy;pbNGS4M`vJcc&OI*sJTzdN>{dx$;fkGTZWMlg#%f%qBpjpnZ(dcv{Yk2bZUrq2GIvfjZ;FYcJS16Vj3gEXdc@jsPQO# zAAK~nbvJ2a0+qjR@kf^ZokqoM6`Dk;<*Ftn%20$+v2be+vHrz`v9ChX()vcvyV4 literal 0 HcmV?d00001 diff --git a/astropy/io/ascii/tests/t/daophot2.dat b/astropy/io/ascii/tests/t/daophot2.dat new file mode 100644 index 0000000..5c9d956 --- /dev/null +++ b/astropy/io/ascii/tests/t/daophot2.dat @@ -0,0 +1,31 @@ +#N IMAGE XINIT YINIT ID COORDS LID \ +#U imagename pixels pixels ## filename ## \ +#F %-23s %-10.3f %-10.3f %-6d %-23s %-6.0f +# +#N XCENTER YCENTER XSHIFT YSHIFT XERR YERR CIER CERROR \ +#U pixels pixels pixels pixels pixels pixels ## cerrors \ +#F %-14.3f %-11.3f %-8.3f %-8.3f %-8.3f %-15.3f %-5d %-9s +# +#N MSKY STDEV SSKEW NSKY NSREJ SIER SERROR \ +#U counts counts counts npix npix ## serrors \ +#F %-18.7g %-15.7g %-15.7g %-7d %-9d %-5d %-9s +# +#N ITIME XAIRMASS IFILTER OTIME \ +#U timeunit number name timeunit \ +#F %-18.7g %-15.7g %-23s %-23s +# +#N RAPERT SUM AREA FLUX MAG MERR PIER PERROR \ +#U scale counts pixels counts mag mag ## perrors \ +#F %-12.2f %-14.7g %-11.7g %-14.7g %-7.3f %-6.3f %-5s %-9s +# +n8q624e8q12_cal.fits[1]76.102 2.280 1 test.stars 1 \ + 76.150 2.182 0.048 -0.098 0.016 0.014 108 BadPixels \ + 0.5378259 0.1369367 0.1002712 604 176 0 NoError \ + 1407.892 INDEF F160W INDEF \ + 4.00 0. 0. 0. INDEF INDEF 301 OffImage +n8q624e8q12_cal.fits[1]81.730 3.167 2 test.stars 2 \ + 76.150 2.182 -5.580 -0.985 0.016 0.014 108 BadPixels \ + 0.5378259 0.1369367 0.1002712 604 176 0 NoError \ + 1407.892 INDEF F160W INDEF \ + 4.00 0. 0. 0. INDEF INDEF 301 OffImage + diff --git a/astropy/io/ascii/tests/t/daophot3.dat b/astropy/io/ascii/tests/t/daophot3.dat new file mode 100644 index 0000000..6aa8894 --- /dev/null +++ b/astropy/io/ascii/tests/t/daophot3.dat @@ -0,0 +1,120 @@ +#K IRAF = NOAO/IRAFV2.16 version %-23s +#K USER = joe name %-23s +#K HOST = porteus-ATMA computer %-23s +#K DATE = 2014-06-24 yyyy-mm-dd %-23s +#K TIME = 00:20:18 hh:mm:ss %-23s +#K PACKAGE = apphot name %-23s +#K TASK = phot name %-23s +# +#K SCALE = 1. units %-23.7g +#K FWHMPSF = 6.1 scaleunit %-23.7g +#K EMISSION = yes switch %-23b +#K DATAMIN = 93.232 counts %-23.7g +#K DATAMAX = 4000. counts %-23.7g +#K EXPOSURE = "" keyword %-23s +#K AIRMASS = "" keyword %-23s +#K FILTER = "" keyword %-23s +#K OBSTIME = "" keyword %-23s +# +#K NOISE = poisson model %-23s +#K SIGMA = 0.132 counts %-23.7g +#K GAIN = "" keyword %-23s +#K EPADU = 1152. e-/adu %-23.7g +#K CCDREAD = "" keyword %-23s +#K READNOISE = 0.05 e- %-23.7g +# +#K CALGORITHM = centroid algorithm %-23s +#K CBOXWIDTH = 5. scaleunit %-23.7g +#K CTHRESHOLD = 0. sigma %-23.7g +#K MINSNRATIO = 1. number %-23.7g +#K CMAXITER = 10 number %-23d +#K MAXSHIFT = 1. scaleunit %-23.7g +#K CLEAN = no switch %-23b +#K RCLEAN = 1. scaleunit %-23.7g +#K RCLIP = 2. scaleunit %-23.7g +#K KCLEAN = 3. sigma %-23.7g +# +#K SALGORITHM = centroid algorithm %-23s +#K ANNULUS = 24.4 scaleunit %-23.7g +#K DANNULUS = 15. scaleunit %-23.7g +#K SKYVALUE = 0. counts %-23.7g +#K KHIST = 3. sigma %-23.7g +#K BINSIZE = 0.1 sigma %-23.7g +#K SMOOTH = no switch %-23b +#K SMAXITER = 10 number %-23d +#K SLOCLIP = 0. percent %-23.7g +#K SHICLIP = 0. percent %-23.7g +#K SNREJECT = 50 number %-23d +#K SLOREJECT = 3. sigma %-23.7g +#K SHIREJECT = 3. sigma %-23.7g +#K RGROW = 0. scaleunit %-23.7g +# +#K WEIGHTING = constant model %-23s +#K APERTURES = 23.3,6:25:5 scaleunit %-23s +#K ZMAG = 25. zeropoint %-23.7g +# +#N IMAGE XINIT YINIT ID COORDS LID \ +#U imagename pixels pixels ## filename ## \ +#F %-23s %-10.3f %-10.3f %-6d %-23s %-6d +# +#N XCENTER YCENTER XSHIFT YSHIFT XERR YERR CIER CERROR \ +#U pixels pixels pixels pixels pixels pixels ## cerrors \ +#F %-14.3f %-11.3f %-8.3f %-8.3f %-8.3f %-15.3f %-5d %-9s +# +#N MSKY STDEV SSKEW NSKY NSREJ SIER SERROR \ +#U counts counts counts npix npix ## serrors \ +#F %-18.7g %-15.7g %-15.7g %-7d %-9d %-5d %-9s +# +#N ITIME XAIRMASS IFILTER OTIME \ +#U timeunit number name timeunit \ +#F %-18.7g %-15.7g %-23s %-23s +# +#N RAPERT SUM AREA FLUX MAG MERR PIER PERROR \ +#U scale counts pixels counts mag mag ## perrors \ +#F %-12.2f %-14.7g %-11.7g %-14.7g %-7.3f %-6.3f %-5d %-9s +# +Slope-AS40-435_median_S299.929 49.652 366 Slope-AS40-435_median_S366 \ + 300.120 49.969 0.191 0.317 0.011 0.012 0 NoError \ + 94.57384 0.1865725 0.09473237 2064 938 0 NoError \ + 1. INDEF INDEF INDEF \ + 6.00 10709.69 113.2273 1.350839 24.673 1.639 0 NoError *\ + 11.00 35964.65 380.2424 3.670495 23.588 1.171 0 NoError *\ + 16.00 76082.82 804.4385 3.982883 23.500 1.701 0 NoError *\ + 21.00 131202.7 1385.878 134.9305 INDEF INDEF 305 BadPixels*\ + 23.30 162159.5 1706.24 793.8187 INDEF INDEF 305 BadPixels* +Slope-AS40-435_median_S85.452 55.434 367 Slope-AS40-435_median_S367 \ + 85.458 55.484 0.006 0.050 0.008 0.006 0 NoError \ + 94.59016 0.2281704 0.1264289 1623 1378 0 NoError \ + 1. INDEF INDEF INDEF \ + 6.00 10761.49 112.8701 85.08714 20.175 0.032 0 NoError *\ + 11.00 36058.47 380.1428 100.7009 19.992 0.053 0 NoError *\ + 16.00 76216.4 804.6936 100.2974 19.997 0.086 0 NoError *\ + 21.00 130393.5 1386.111 -719.0389 INDEF INDEF 305 BadPixels*\ + 23.30 158316.8 1706.482 -3099.61 INDEF INDEF 305 BadPixels* +Slope-AS40-435_median_S848.186 56.486 368 Slope-AS40-435_median_S368 \ + 848.380 56.544 0.194 0.058 0.013 0.009 0 NoError \ + 94.59234 0.1647499 0.06409879 2098 903 0 NoError \ + 1. INDEF INDEF INDEF \ + 6.00 10735.4 113.059 40.88579 20.971 0.048 0 NoError *\ + 11.00 36009.39 380.2245 43.06569 20.915 0.088 0 NoError *\ + 16.00 76169.49 804.6198 58.62642 20.580 0.102 0 NoError *\ + 21.00 131348.9 1386.085 235.8676 INDEF INDEF 305 BadPixels*\ + 23.30 161839.2 1706.263 439.7652 INDEF INDEF 305 BadPixels* +Slope-AS40-435_median_S464.199 59.384 369 Slope-AS40-435_median_S369 \ + 464.273 59.617 0.074 0.233 0.010 0.011 0 NoError \ + 94.60605 0.1613172 0.04022013 2314 686 0 NoError \ + 1. INDEF INDEF INDEF \ + 6.00 10732.46 113.3501 8.849111 22.633 0.216 0 NoError *\ + 11.00 35991.75 380.3943 4.148174 23.455 0.889 0 NoError *\ + 16.00 76101.38 804.4529 -4.720454 INDEF INDEF 0 NoError *\ + 21.00 131053.2 1385.598 -32.75801 INDEF INDEF 0 NoError *\ + 23.30 161354.6 1705.858 -29.88808 INDEF INDEF 0 NoError * +Slope-AS40-435_median_S688.924 61.839 370 Slope-AS40-435_median_S370 \ + 689.056 61.637 0.132 -0.202 0.009 0.017 0 NoError \ + 94.56474 0.1917982 -0.04442054 2363 646 0 NoError \ + 1. INDEF INDEF INDEF \ + 6.00 10761.45 113.5188 26.56977 21.439 0.086 0 NoError *\ + 11.00 36012.39 380.5187 28.73899 21.354 0.152 0 NoError *\ + 16.00 76101.65 804.5662 18.05782 21.858 0.379 0 NoError *\ + 21.00 131029.3 1385.578 2.4874 24.011 3.925 0 NoError *\ + 23.30 161285.7 1705.41 14.05488 22.130 0.803 0 NoError * diff --git a/astropy/io/ascii/tests/t/daophot4.dat b/astropy/io/ascii/tests/t/daophot4.dat new file mode 100644 index 0000000..a20233e --- /dev/null +++ b/astropy/io/ascii/tests/t/daophot4.dat @@ -0,0 +1,113 @@ +#K IRAF = NOAO/IRAFV2.14.1 version %-23s +#K USER = hannes name %-23s +#K HOST = prometheus computer %-23s +#K DATE = 2015-03-11 yyyy-mm-dd %-23s +#K TIME = 15:26:26 hh:mm:ss %-23s +#K PACKAGE = apphot name %-23s +#K TASK = phot name %-23s +# +#K SCALE = 1. units %-23.7g +#K FWHMPSF = 4.119713 scaleunit %-23.7g +#K EMISSION = yes switch %-23b +#K DATAMIN = INDEF counts %-23.7g +#K DATAMAX = 65536. counts %-23.7g +#K EXPOSURE = exposure keyword %-23s +#K AIRMASS = airmass keyword %-23s +#K FILTER = filter keyword %-23s +#K OBSTIME = utc-obs keyword %-23s +# +#K NOISE = poisson model %-23s +#K SIGMA = 41.66582 counts %-23.7g +#K GAIN = "" keyword %-23s +#K EPADU = 1. e-/adu %-23.7g +#K CCDREAD = "" keyword %-23s +#K READNOISE = 7.49 e- %-23.7g +# +#K CALGORITHM = centroid algorithm %-23s +#K CBOXWIDTH = 12. scaleunit %-23.7g +#K CTHRESHOLD = 3. sigma %-23.7g +#K MINSNRATIO = 1. number %-23.7g +#K CMAXITER = 10 number %-23d +#K MAXSHIFT = 5. scaleunit %-23.7g +#K CLEAN = no switch %-23b +#K RCLEAN = 1. scaleunit %-23.7g +#K RCLIP = 2. scaleunit %-23.7g +#K KCLEAN = 3. sigma %-23.7g +# +#K SALGORITHM = centroid algorithm %-23s +#K ANNULUS = 7.17957 scaleunit %-23.7g +#K DANNULUS = 7.82043 scaleunit %-23.7g +#K SKYVALUE = 0. counts %-23.7g +#K KHIST = 3. sigma %-23.7g +#K BINSIZE = 0.1 sigma %-23.7g +#K SMOOTH = no switch %-23b +#K SMAXITER = 10 number %-23d +#K SLOCLIP = 3. percent %-23.7g +#K SHICLIP = 3. percent %-23.7g +#K SNREJECT = 50 number %-23d +#K SLOREJECT = 3. sigma %-23.7g +#K SHIREJECT = 3. sigma %-23.7g +#K RGROW = 0. scaleunit %-23.7g +# +#K WEIGHTING = constant model %-23s +#K APERTURES = 1.0, 2.0, 3.0, 4.0, 5.0 scaleunit %-23s +#K ZMAG = 0. zeropoint %-23.7g +# +#N IMAGE XINIT YINIT ID COORDS LID \ +#U imagename pixels pixels ## filename ## \ +#F %-23s %-10.3f %-10.3f %-6d %-23s %-6d +# +#N XCENTER YCENTER XSHIFT YSHIFT XERR YERR CIER CERROR \ +#U pixels pixels pixels pixels pixels pixels ## cerrors \ +#F %-14.3f %-11.3f %-8.3f %-8.3f %-8.3f %-15.3f %-5d %-9s +# +#N MSKY STDEV SSKEW NSKY NSREJ SIER SERROR \ +#U counts counts counts npix npix ## serrors \ +#F %-18.7g %-15.7g %-15.7g %-7d %-9d %-5d %-9s +# +#N ITIME XAIRMASS IFILTER OTIME \ +#U timeunit number name timeunit \ +#F %-18.7g %-15.7g %-23s %-23s +# +#N RAPERT SUM AREA FLUX MAG MERR PIER PERROR \ +#U scale counts pixels counts mag mag ## perrors \ +#F %-12.2f %-14.7g %-11.7g %-14.7g %-7.3f %-6.3f %-5d %-9s +# +20150224.010.bff.fits[*106.579 106.934 1 20150224.010.bff.coo 1 \ + 106.559 108.018 -0.020 1.084 0.101 0.074 0 NoError \ + 2274.581 35.8673 11.2252 507 34 0 NoError \ + 15. INDEF WL 18.25805367777778 \ + 1.00 9109.85 3.522305 1098.081 -4.661 0.074 0 NoError *\ + 2.00 31801.22 12.6082 3122.852 -5.796 0.049 0 NoError *\ + 3.00 70478.22 28.76358 5053.117 -6.319 0.045 0 NoError *\ + 4.00 121419. 50.47698 6604.971 -6.609 0.046 0 NoError *\ + 5.00 186154.5 78.6758 7199.989 -6.703 0.053 0 NoError *\ + 6.00 266042.9 113.531 7807.418 -6.791 0.060 0 NoError *\ + 7.00 358247.2 153.8083 8397.668 -6.870 0.067 0 NoError *\ + 8.00 467542.2 201.7787 8580.032 -6.893 0.077 0 NoError *\ + 9.00 587422. 254.4993 8542.593 -6.889 0.090 0 NoError *\ + 10.00 724023.7 314.4823 8708.25 -6.910 0.102 0 NoError *\ + 11.00 874403.1 380.5716 8762.004 -6.916 0.115 0 NoError *\ + 12.00 1036954. 452.126 8556.425 -6.891 0.134 0 NoError *\ + 13.00 1217921. 531.7312 8455.69 -6.878 0.152 0 NoError *\ + 14.00 1408227. 615.5404 8130.7 -6.835 0.177 0 NoError *\ + 15.00 1617583. 707.5204 8270.082 -6.854 0.194 0 NoError * +20150224.010.bff.fits[*28.377 105.125 2 20150224.010.bff.coo 2 \ + 28.334 106.194 -0.043 1.069 0.057 0.057 0 NoError \ + 2255.277 33.60751 14.9162 503 47 0 NoError \ + 15. INDEF WL 18.25805367777778 \ + 1.00 9725.537 3.433434 1982.191 -5.303 0.042 0 NoError *\ + 2.00 34708.58 12.786 5872.618 -6.482 0.027 0 NoError *\ + 3.00 73724.14 28.56388 9304.661 -6.982 0.024 0 NoError *\ + 4.00 125125.7 50.50517 11222.5 -7.185 0.026 0 NoError *\ + 5.00 189990.9 78.80913 12254.43 -7.281 0.030 0 NoError *\ + 6.00 268543.3 113.428 12731.67 -7.322 0.035 0 NoError *\ + 7.00 360879.8 154.1563 13214.47 -7.362 0.040 0 NoError *\ + 8.00 467521.8 201.3365 13452.14 -7.382 0.046 0 NoError *\ + 9.00 588086.8 254.6784 13716.38 -7.403 0.053 0 NoError *\ + 10.00 723155.3 314.422 14046.6 -7.429 0.059 0 NoError *\ + 11.00 872591.1 380.4647 14537.59 -7.466 0.066 0 NoError *\ + 12.00 1035008. 452.6064 14254.72 -7.445 0.076 0 NoError *\ + 13.00 1212656. 531.2477 14544.82 -7.467 0.083 0 NoError *\ + 14.00 1404271. 615.9114 15220.43 -7.516 0.089 0 NoError *\ + 15.00 1610992. 707.2085 16040.83 -7.573 0.094 0 NoError * diff --git a/astropy/io/ascii/tests/t/fill_values.txt b/astropy/io/ascii/tests/t/fill_values.txt new file mode 100644 index 0000000..323e746 --- /dev/null +++ b/astropy/io/ascii/tests/t/fill_values.txt @@ -0,0 +1,3 @@ +a,b,c +1,2,3 +a,a,4 diff --git a/astropy/io/ascii/tests/t/fixed_width_2_line.txt b/astropy/io/ascii/tests/t/fixed_width_2_line.txt new file mode 100644 index 0000000..2d78258 --- /dev/null +++ b/astropy/io/ascii/tests/t/fixed_width_2_line.txt @@ -0,0 +1,4 @@ +Col1 Col2 Col3 Col4 +---- --------- ---- ---- + 1.2 "hello" 1 a + 2.4 's worlds 2 2 diff --git a/astropy/io/ascii/tests/t/html.html b/astropy/io/ascii/tests/t/html.html new file mode 100644 index 0000000..da5ac91 --- /dev/null +++ b/astropy/io/ascii/tests/t/html.html @@ -0,0 +1,29 @@ + + + + + + + + + + + + +
    Column 1Column 2Column 3
    1a1.05
    2b2.75
    3c-1.25
    + + + + + +
    Column AColumn BColumn C
    4d10.5
    5e27.5
    6f-12.5
    + + + + + +
    C1C2C3
    7g105.0
    8h275.0
    9i-125.0
    + + \ No newline at end of file diff --git a/astropy/io/ascii/tests/t/html2.html b/astropy/io/ascii/tests/t/html2.html new file mode 100644 index 0000000..50b1fee --- /dev/null +++ b/astropy/io/ascii/tests/t/html2.html @@ -0,0 +1,28 @@ + + + + + + + + +Row with no data elements + + + + + + + + + + + + + + + Some junk + +
    AB
    12.50000000000000000013
    1a13.5
    + + diff --git a/astropy/io/ascii/tests/t/ipac.dat b/astropy/io/ascii/tests/t/ipac.dat new file mode 100644 index 0000000..f7d51cd --- /dev/null +++ b/astropy/io/ascii/tests/t/ipac.dat @@ -0,0 +1,12 @@ +\intval = 1 +\floatval=2.3e3 +\date = "Wed Sp 20 09:48:36 1995" +\key_continue = 'IPAC keywords ' +\key_continue = 'can continue across lines' +\ This is an example of a valid comment +| ra | dec | sai |-----v2---| sptype | +| real | real | int | real | char | +| unit | unit | unit | unit | ergs | +| null | null | -999 | null | -999 | + null 29.09056 -999 2.06000 -999 +12345678901234567890123456789012345678901234567890123456789012345 diff --git a/astropy/io/ascii/tests/t/ipac.dat.bz2 b/astropy/io/ascii/tests/t/ipac.dat.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..a1dd3412aa5b3fb8578f614b6a647c6cb5e50251 GIT binary patch literal 385 zcmV-{0e=2MT4*^jL0KkKS;h+@S^xmF-+*=yP~(5{C?HIPFYn*vFacSns5MV0$*A=< zNxeiGGynr24^W5^rX=+;G|7Tsj1hz|2@(*DrqPn0qM9@hPpsJWFO{SMAO1 zg|r7##sZ*#;ZBy+$Rz;@%pnRPwlG|x@92|G43?^U_`~BQzJ+ragnw<1%3PybU3sMZ zZng!DB?8zkf~7%cQCB2gfJ$?@XXE88+UxEY!;H;7e3yAgI#bnr{_eD(uG@OFa-DZ5 zjZ*J9ok0d&2E=dthK$2h!UEJusJ?93mY4&}165JF$$u!ej$r)jEN#55f|&#dHj|P} zg(Q3^h!`49o8qv_qkDo3#E@Mx;%X5(vz6q_=T~jWtj8@P=@8Rkf5qI9P81|Dg2vf`Nym6X7T8AW)qm(u zlJKm@R5U-6)kAG4Wj84%dC-=@<;wWXa}h@KwCqQ{0^La;On9Z1IXlk75$sMP=vJXc zUK`y|Oma5{k?~-v?NZG-WB|L3!qA;0O5RQd!t>;xv1s}u%Up(`xCzaS)N})+r>4zh?(S4gT>ycPya!lO%Nm2Lgi?Xt^?^H$xTHI!$Wwyr3u?6gN9P((P#~i0WM$&rKT)X`48{ub)`}_a^0IIuN AaR2}S literal 0 HcmV?d00001 diff --git a/astropy/io/ascii/tests/t/latex2.tex b/astropy/io/ascii/tests/t/latex2.tex new file mode 100644 index 0000000..f0d85ee --- /dev/null +++ b/astropy/io/ascii/tests/t/latex2.tex @@ -0,0 +1,14 @@ +\begin{deluxetable}{llrl} +%\tabletypesize{\scriptsize} +%\rotate +\tablecaption{Log of observations\label{tab:obslog}} +\tablewidth{0pt} +\tablehead{\colhead{Facility} & \colhead{Id} & \colhead{exposure} & \colhead{date}} + +\startdata +Chandra & \dataset[ADS/Sa.CXO#obs/06438]{ObsId 6438} & 23 ks & 2006-12-10\\ +Spitzer & AOR 3656448 & 41.6 s & 2004-06-09\\ +FLWO & filter: $B$ & 600 s & 2009-11-18\\ +\enddata + +\end{deluxetable} diff --git a/astropy/io/ascii/tests/t/latex3.tex b/astropy/io/ascii/tests/t/latex3.tex new file mode 100644 index 0000000..a8c7803 --- /dev/null +++ b/astropy/io/ascii/tests/t/latex3.tex @@ -0,0 +1,7 @@ +\begin{tabular}{lrr}\hline +cola & colb & colc\\ +\hline +a & 1 & 2\\ +b & 3 & 4\\ +\hline +\end{tabular} diff --git a/astropy/io/ascii/tests/t/nls1_stackinfo.dbout b/astropy/io/ascii/tests/t/nls1_stackinfo.dbout new file mode 100644 index 0000000..015139f --- /dev/null +++ b/astropy/io/ascii/tests/t/nls1_stackinfo.dbout @@ -0,0 +1,60 @@ + |objID |osrcid |xsrcid |SpecObjID |ra |dec |obsid |ccdid |z |modelMag_i |modelMagErr_i |modelMag_r |modelMagErr_r |expo |theta |rad_ecf_39 |detlim90 |fBlim90 +|-----------------------|-----------------|---------------|-----------------------|--------------------|--------------------|-----------|-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|-------------------- +| 277955213|S000.7044P00.7513|XS04861B6_005 | 10943136| 0.704453| 0.751336| 4861| 6| 0.086550| 15.462060| 0.003840| 16.063650| 0.003888| 5104.621261| 0.105533| 3.022382| 15.117712| 0.311318 +| 889974380|S002.9051P14.7003|XS03957B7_004 | 21189832| 2.905195| 14.700391| 3957| 7| 0.131820| 16.466050| 0.004807| 16.992690| 0.004917| 1479.207035| 0.118550| 3.016342| 17.364280| 0.880407 +| 661258793|S005.7709M01.1287|XS04079B7_003 | 10999832| 5.770986| -1.128731| 4079| 7| 0.166355| 17.232030| 0.008332| 17.549760| 0.007209| 1540.924685| 0.073783| 1.489627| 11.915912| 0.561011 +| 809266720|S006.9683P00.4376|XS04080B7_003 | 11027112| 6.968335| 0.437687| 4080| 7| 0.205337| 17.600880| 0.007790| 18.047560| 0.007439| 1373.690631| 0.073017| 1.489627| 15.480587| 0.807865 +| 275803698|S014.7729P00.1143|XS02179B6_001 | 11140928| 14.772956| 0.114358| 2179| 6| 0.718880| 17.487000| 0.006978| 17.441360| 0.005979| 2043.570572| 0.091283| 1.453126| 13.288200| 0.676781 +| 610324605|S029.2184M00.2061|XS04081B7_004 | 11365768| 29.218458| -0.206140| 4081| 7| 0.163040| 17.522280| 0.006957| 17.821940| 0.006828| 1513.497218| 0.073333| 1.489627| 12.188137| 0.580337 +| 819359440|S029.9901P00.5529|XS05777B1_005 | 11365080| 29.990162| 0.552903| 5777| 1| 0.311778| 18.508300| 0.013120| 18.822060| 0.011235| 16875.600510| 0.173000| 5.127182| 29.849694| 0.201770 +| 359375943|S037.1728P00.8690|XS04083B7_002 | 11478640| 37.172803| 0.869065| 4083| 7| 0.186225| 17.741220| 0.008360| 18.157300| 0.007994| 1600.672011| 0.074100| 1.489627| 12.060426| 0.546492 +| 680002094|S048.6144M01.1978|XS04084B7_001 | 11619072| 48.614411| -1.197867| 4084| 7| 0.387004| 18.084100| 0.008811| 18.047740| 0.007107| 1688.844386| 0.074850| 1.489627| 14.418508| 0.665490 +| 207476987|S122.0691P21.1492|XS03785B1_003 | 54178104| 122.069156| 21.149206| 3785| 1| 0.142121| 18.795740| 0.014157| 19.272550| 0.014808| 15935.690359| 0.148833| 5.116525| 25.744492| 0.182462 +| 314622107|S124.9642P36.8307|XS04119B3_002 | 25158064| 124.964241| 36.830793| 4119| 3| 0.736540| 19.246110| 0.015329| 19.180730| 0.011400| 6686.525810| 0.191800| 7.738524| 30.212630| 0.663496 +| 499048612|S128.7287P55.5725|XS04940B7_008 | 50209680| 128.728748| 55.572530| 4940| 7| 0.241157| 16.196610| 0.006701| 16.845690| 0.008232| 85385.450431| 0.021583| 0.327020| 25.359343| 0.022349 +| 509872023|S130.2167P13.2152|NULL | 68308384| 130.216762| 13.215295| 2130| 7| 0.170352| 17.437750| 0.009265| 17.989470| 0.010459| 22105.895051| 0.010694| 0.232184| 8.703367| 0.030060 + | 337394906|S134.7069P27.8194|NULL | 54460872| 134.706929| 27.819409| 5821| 3| 0.090713| 15.495630| 0.004090| 15.933850| 0.004758| 20139.691101| 0.153217| 5.127182| 26.899264| 0.175787 +| 204612149|S140.7808P30.9906|XS04122B5_001 | 54657400| 140.780817| 30.990687| 4122| 5| 0.629145| 18.845160| 0.012765| 18.948480| 0.010724| 4173.745162| 0.192050| 7.739623| 37.336536| 0.819048 +| 731490396|S147.5151P17.1590|XS03274B2_001 | 66732256| 147.515194| 17.159084| 3274| 2| 0.195364| 17.472340| 0.006327| 17.783260| 0.006028| 14096.036370| 0.032833| 0.366684| 9.702502| 0.075172 +| 138368206|S147.6362P59.8164|NULL | 12773752| 147.636280| 59.816408| 3036| 2| 0.652411| 19.914220| 0.029210| 20.094790| 0.024926| 4012.606072| 0.167283| 5.127182| 21.727958| 0.697762 +| 561051767|S151.8587P12.8156|XS05606B7_004 | 49112864| 151.858761| 12.815617| 5606| 7| 0.240653| 15.175160| 0.004690| 15.348870| 0.004204| 33943.906753| 0.008806| 0.243602| 8.594830| 0.019169 +| 827223175|S153.3119M00.8760|XS04085B7_001 | 7622024| 153.311933| -0.876011| 4085| 7| 0.275749| 17.638600| 0.006945| 17.638410| 0.005750| 1769.308133| 0.074400| 1.489627| 12.371362| 0.512717 +| 125920375|S160.6255P01.0399|XS04086B7_004 | 7762256| 160.625571| 1.039913| 4086| 7| 0.115493| 16.476400| 0.006180| 16.952690| 0.005979| 1351.602676| 0.074017| 1.489627| 12.388117| 0.674828 +| 126051412|S160.8870P01.0191|XS04086B2_001 | 7762456| 160.887017| 1.019120| 4086| 2| 0.071893| 15.403520| 0.003958| 15.830900| 0.003798| 1470.312820| 0.188000| 7.763980| 24.843778| 2.297003 +| 199471676|S169.6261P40.4316|XS00868B3_001 | 40555520| 169.626193| 40.431669| 868| 3| 0.154596| 15.520440| 0.003612| 15.843520| 0.003574| 15875.864312| 0.039917| 0.409867| 10.331539| 0.069317 +| 911117410|S174.3501P30.0602|XS04161B7_011 | 62510944| 174.350159| 30.060294| 4161| 7| 0.695136| 19.910250| 0.032209| 20.022840| 0.021641| 14988.033880| 0.062233| 0.614561| 11.836136| 0.055041 +| 302536231|S179.8826P29.2455|XS00874B3_007 | 62651000| 179.882670| 29.245515| 874| 3| 0.724488| 17.965190| 0.007865| 18.090560| 0.007281| 94375.899791| 0.004833| 0.230755| 8.240796| 0.009124 +| 302601830|S179.9533P29.1580|XS00874B2_001 | 62623640| 179.953385| 29.158023| 874| 2| 0.083344| 15.802610| 0.004156| 16.238050| 0.004098| 84775.542942| 0.105917| 3.022382| 21.690631| 0.026736 +| 115261957|S180.7950P57.6803|XS05757B0_022 | 37008112| 180.795062| 57.680354| 5757| 0| 0.759025| 18.066390| 0.008409| 18.060240| 0.006947| 40390.627482| 0.178917| 5.125388| 34.575626| 0.096856 +| 607275593|S183.4289P02.8802|XS04934B3_004 | 14602336| 183.428996| 2.880256| 4934| 3| 0.641174| 19.083390| 0.016683| 19.264170| 0.014156| 17374.807609| 0.067033| 1.489627| 12.291123| 0.078403 +| 425979958|S183.5631P00.9198|XS04087B7_004 | 8100808| 183.563163| 0.919874| 4087| 7| 0.395653| 18.254720| 0.010882| 18.328170| 0.008583| 1743.376400| 0.075183| 1.489627| 12.265030| 0.491269 +| 189855768|S184.4790P58.6599|XS03558B3_002 | 37036288| 184.479077| 58.659912| 3558| 3| 0.023181| 14.626880| 0.002469| 14.904420| 0.002339| 6129.941952| 0.003750| 0.232403| 7.666659| 0.136118 +| 619169285|S187.0751P44.2172|NULL | 38612200| 187.075137| 44.217228| 938| 0| 0.662250| 17.907240| 0.007109| 18.053730| 0.006975| 2298.154599| 0.172200| 5.125388| 20.352558| 0.848250 +| 325588542|S187.5646P03.0485|XS04040B7_001 | 14659784| 187.564673| 3.048508| 4040| 7| 0.137670| 16.402290| 0.004927| 17.103210| 0.005467| 3409.797684| 0.010833| 0.243602| 7.851051| 0.160035 +| 574503609|S187.6176P47.8825|NULL | 40921304| 187.617696| 47.882592| 3071| 3| 0.259120| 18.357610| 0.011731| 18.646700| 0.010925| 6222.550176| 0.197167| 7.763595| 30.051725| 0.668981 +| 101878322|S188.4820P13.0754|XS02107B7_001 | 45509408| 188.482006| 13.075423| 2107| 7| 0.480211| 18.623910| 0.015033| 19.178470| 0.015434| 5550.153407| 0.015417| 0.276499| 8.218379| 0.089948 +| 834099774|S188.5555P47.8975|XS03055B7_001 | 40921752| 188.555591| 47.897583| 3055| 7| 0.372812| 16.768010| 0.004767| 16.822040| 0.004038| 4452.821575| 0.009889| 0.243602| 8.075360| 0.119255 +| 528223925|S191.3095P01.1419|NULL | 8213952| 191.309592| 1.141912| 2974| 2| 0.091196| 16.407150| 0.006573| 16.882680| 0.006505| 5665.430694| 0.160700| 5.127182| 21.105785| 0.481827 +| 430960732|S194.9316P01.0486|XS04088B7_005 | 8269800| 194.931643| 1.048622| 4088| 7| 0.394569| 18.230550| 0.009419| 18.305880| 0.007674| 1532.367693| 0.075000| 1.489627| 11.697405| 0.529051 +| 040450702|S196.9301P46.7193|NULL | 41090688| 196.930172| 46.719346| 3244| 6| 0.600141| 19.711200| 0.021784| 20.631250| 0.030904| 8481.301760| 0.184333| 5.111493| 28.487300| 0.409704 +| 895335014|S197.7853P00.5310|XS04089B7_006 | 8297720| 197.785328| 0.531036| 4089| 7| 0.429236| 17.838440| 0.007412| 17.883200| 0.006128| 1342.669846| 0.075917| 1.489627| 11.975790| 0.622564 +| 362199556|S206.2204P00.0889|NULL | 8438656| 206.220450| 0.088956| 2251| 6| 0.087128| 15.878880| 0.003993| 16.339870| 0.003999| 7732.826167| 0.146900| 5.125388| 24.196409| 0.276815 +| 390308579|S213.1444M00.5833|XS04090B7_001 | 8550616| 213.144471| -0.583347| 4090| 7| 0.126940| 16.924460| 0.008082| 17.337560| 0.007611| 1850.463370| 0.074433| 1.489627| 12.129062| 0.475026 +| 444464848|S213.7065P36.2111|XS04163B1_002 | 46269424| 213.706536| 36.211187| 4163| 1| 0.180925| 17.916410| 0.009867| 18.346860| 0.009661| 81178.124219| 0.092700| 1.460516| 17.286353| 0.023684 +| 222587913|S216.7550P44.2825|XS06112B2_004 | 36276768| 216.755074| 44.282505| 6112| 2| 0.735436| 19.039310| 0.015654| 19.133000| 0.012307| 7202.822662| 0.137533| 3.019678| 17.903948| 0.270855 +| 929145428|S217.6259M00.1875|XS04091B7_004 | 8607176| 217.625904| -0.187530| 4091| 7| 0.103307| 17.334130| 0.007846| 17.791860| 0.007610| 1362.631234| 0.075700| 1.489627| 11.674970| 0.622522 +| 428847268|S217.6691P36.8177|XS04126B7_001 | 38894856| 217.669106| 36.817754| 4126| 7| 0.566053| 18.744800| 0.010372| 19.168410| 0.011316| 2834.413800| 0.009583| 0.243602| 7.405304| 0.178125 +| 440484921|S219.7460P03.5965|XS03290B1_006 | 16516928| 219.746065| 3.596520| 3290| 1| 0.733848| 18.461360| 0.009410| 18.429130| 0.008255| 48647.675049| 0.137250| 3.014202| 26.440169| 0.059182 +| 468047975|S222.3062P00.4019|XS04092B7_004 | 8691936| 222.306273| 0.401911| 4092| 7| 0.440801| 18.675470| 0.012882| 18.855400| 0.010686| 1574.467045| 0.052750| 0.607436| 10.031873| 0.470555 +| 468113483|S222.3862P00.3767|XS04092B7_001 | 8691984| 222.386270| 0.376752| 4092| 7| 0.080563| 16.388650| 0.004431| 16.884420| 0.004493| 1920.873200| 0.074717| 1.489627| 12.712145| 0.488745 +| 931439168|S222.8459M00.1071|XS04093B7_001 | 8691960| 222.845909| -0.107191| 4093| 7| 0.138627| 17.058580| 0.006735| 17.488910| 0.006241| 1898.411113| 0.075567| 1.489627| 11.967944| 0.467229 +| 262643238|S235.8184P54.0905|XS00822B6_002 | 17361832| 235.818430| 54.090581| 822| 6| 0.245121| 17.540910| 0.006667| 17.778130| 0.006324| 3636.411721| 0.140183| 3.019678| 18.358903| 0.439665 +| 926158050|S240.8326P42.3631|NULL | 37599160| 240.832606| 42.363127| 5609| 6| 0.245845| 18.507290| 0.014520| 18.790040| 0.011571| 11325.862421| 0.133133| 3.019841| 18.982950| 0.171760 +| 608676499|S245.9044P31.1722|XS05607B7_001 | 39992048| 245.904431| 31.172231| 5607| 7| 0.235655| 18.073020| 0.012637| 18.487890| 0.011318| 16902.229821| 0.033283| 0.375642| 10.006948| 0.042823 +| 960066205|S246.4348P15.8271|XS03229B1_001 | 62172624| 246.434806| 15.827186| 3229| 1| 0.798335| 18.653970| 0.012212| 18.576130| 0.008949| 42261.803686| 0.138833| 3.014202| 25.770130| 0.068354 +| 134019205|S256.4454P63.1831|XS04094B7_002 | 9845344| 256.445484| 63.183108| 4094| 7| 0.119156| 17.496630| 0.006940| 17.887740| 0.006407| 1714.376381| 0.075200| 1.489627| 11.843095| 0.497777 +| 134609053|S257.3217P61.8895|XS04864B6_004 | 9902624| 257.321721| 61.889546| 4864| 6| 0.292492| 18.075020| 0.010429| 18.285270| 0.008331| 3266.690360| 0.109617| 3.019678| 16.988875| 0.540981 +| 213815608|S260.0418P26.6255|XS04361B3_014 | 27578480| 260.041831| 26.625566| 4361| 3| 0.159240| 14.936350| 0.003416| 15.449310| 0.003666| 23666.399953| 0.037933| 0.399900| 23.603727| 0.111820 +| 849702763|S264.1609P53.9090|XS04863B6_002 | 10155040| 264.160900| 53.909041| 4863| 6| 0.407487| 18.748560| 0.014215| 19.233860| 0.015527| 4649.657613| 0.107100| 3.022382| 16.279548| 0.369717 +| 801664702|S349.5880P00.4935|NULL | 10774344| 349.588069| 0.493526| 4938| 7| 0.376296| 18.852000| 0.018428| 19.022390| 0.013564| 28181.469589| 0.117017| 3.046228| 21.942945| 0.059519 +| 275333773|S354.7242P00.8034|XS04095B7_002 | 10859368| 354.724289| 0.803473| 4095| 7| 0.169759| 17.812580| 0.009228| 18.205800| 0.008355| 1513.825375| 0.075283| 1.489627| 12.057631| 0.593043 diff --git a/astropy/io/ascii/tests/t/no_data_cds.dat b/astropy/io/ascii/tests/t/no_data_cds.dat new file mode 100644 index 0000000..dfe11a0 --- /dev/null +++ b/astropy/io/ascii/tests/t/no_data_cds.dat @@ -0,0 +1,37 @@ + + + + +Title: Spitzer Observations of NGC 1333: A Study of Structure and Evolution + in a Nearby Embedded Cluster +Authors: Gutermuth R.A., Myers P.C., Megeath S.T., Allen L.E., Pipher J.L., + Muzerolle J., Porras A., Winston E., Fazio G. +Table: Spitzer-identified YSOs: Addendum +================================================================================ +Byte-by-byte Description of file: datafile3.txt +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 3 I3 --- Index Running identification number + 5- 6 I2 h RAh Hour of Right Ascension (J2000) + 8- 9 I2 min RAm Minute of Right Ascension (J2000) + 11- 15 F5.2 s RAs Second of Right Ascension (J2000) + - continuation of description + 17 A1 --- DE- Sign of the Declination (J2000) + 18- 19 I2 deg DEd Degree of Declination (J2000) + 21- 22 I2 arcmin DEm Arcminute of Declination (J2000) + 24- 27 F4.1 arcsec DEs Arcsecond of Declination (J2000) + 29- 68 A40 --- Match Literature match + 70- 75 A6 --- Class Source classification (1) + 77-80 F4.2 mag AK ? The K band extinction (2) + 82-86 F5.2 --- Fit ? Fit of IRAC photometry (3) +-------------------------------------------------------------------------------- +Note (1): Asterisks mark "deeply embedded" sources with questionable IRAC + colors or incomplete IRAC photometry and relatively bright + MIPS 24 micron photometry. +Note (2): Only provided for sources with valid JHK_S_ photometry. +Note (3): Defined as the slope of the linear least squares fit to the + 3.6 - 8.0 micron SEDs in log{lambda} F_{lambda} vs log{lambda} space. + Extinction is not accounted for in these values. High extinction can + bias Fit to higher values. +-------------------------------------------------------------------------------- diff --git a/astropy/io/ascii/tests/t/no_data_daophot.dat b/astropy/io/ascii/tests/t/no_data_daophot.dat new file mode 100644 index 0000000..3fb49ef --- /dev/null +++ b/astropy/io/ascii/tests/t/no_data_daophot.dat @@ -0,0 +1,7 @@ +#K MERGERAD = INDEF scaleunit %-23.7g +#N ID XCENTER YCENTER MAG MERR MSKY NITER \ +#U ## pixels pixels magnitudes magnitudes counts ## \ +#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d +#N SHARPNESS CHI PIER PERROR \ +#U ## ## ## perrors \ +#F %-23.3f %-12.3f %-6d %-13s diff --git a/astropy/io/ascii/tests/t/no_data_ipac.dat b/astropy/io/ascii/tests/t/no_data_ipac.dat new file mode 100644 index 0000000..eecb488 --- /dev/null +++ b/astropy/io/ascii/tests/t/no_data_ipac.dat @@ -0,0 +1,10 @@ +\catalog = sao +\date = "Wed Sp 20 09:48:36 1995" +\mykeyword = 'Another way for defining keyvalue string' +\ This is an example of a valid comment. +\ The 2nd data line is used to verify the exact column parsing +\ (unclear if this is a valid for the IPAC format) +| ra | dec | sai |-----v2---| sptype | +| real | real | int | real | char | +| unit | unit | unit | unit | ergs | +| null | null | null | null | -999 | diff --git a/astropy/io/ascii/tests/t/no_data_sextractor.dat b/astropy/io/ascii/tests/t/no_data_sextractor.dat new file mode 100644 index 0000000..a9da39b --- /dev/null +++ b/astropy/io/ascii/tests/t/no_data_sextractor.dat @@ -0,0 +1,5 @@ +# 1 NUMBER Galaxy ID number +# 2 FLUX_ISO +# 3 FLUXERR_ISO +# 4 VALUES Note column 5 is missing +# 6 FLAG diff --git a/astropy/io/ascii/tests/t/no_data_with_header.dat b/astropy/io/ascii/tests/t/no_data_with_header.dat new file mode 100644 index 0000000..3774da6 --- /dev/null +++ b/astropy/io/ascii/tests/t/no_data_with_header.dat @@ -0,0 +1 @@ +a b c diff --git a/astropy/io/ascii/tests/t/no_data_without_header.dat b/astropy/io/ascii/tests/t/no_data_without_header.dat new file mode 100644 index 0000000..0641385 --- /dev/null +++ b/astropy/io/ascii/tests/t/no_data_without_header.dat @@ -0,0 +1,2 @@ +# blank data table + diff --git a/astropy/io/ascii/tests/t/sextractor.dat b/astropy/io/ascii/tests/t/sextractor.dat new file mode 100644 index 0000000..5906219 --- /dev/null +++ b/astropy/io/ascii/tests/t/sextractor.dat @@ -0,0 +1,8 @@ +# 1 NUMBER Galaxy ID number +# 2 FLUX_ISO +# 3 FLUXERR_ISO +# 4 VALU-ES Note column 5 is missing +# 6 FLAG +1 0.02580616000000000 0.03974229000000000 1.6770000000000000 0.2710000000000000 0 +2 5.72769100000000009 0.20643300000000001 2.6250000000000000 2.5219999999999998 0 +3 88.31933999999999685 0.59369850000000002 5.9249999999999998 4.7140000000000004 0 diff --git a/astropy/io/ascii/tests/t/sextractor2.dat b/astropy/io/ascii/tests/t/sextractor2.dat new file mode 100644 index 0000000..679ea39 --- /dev/null +++ b/astropy/io/ascii/tests/t/sextractor2.dat @@ -0,0 +1,14 @@ +# 1 NUMBER Running object number +# 2 XWIN_IMAGE Windowed position estimate along x [pixel] +# 3 YWIN_IMAGE Windowed position estimate along y [pixel] +# 4 MAG_AUTO Kron-like elliptical aperture magnitude [mag] +# 5 MAGERR_AUTO RMS error for AUTO magnitude [mag] +# 6 FLAGS Extraction flags +# 7 X2_IMAGE [pixel**2] +# 8 X_MAMA Barycenter position along MAMA x axis [m**(-6)] +# 9 MU_MAX Peak surface brightness above background [mag * arcsec**(-2)] +1 100.523 11.911 -5.3246 0.0416 19 1000.0 0.00304 -3.498 +2 100.660 4.872 -6.4538 0.0214 27 1500.0 0.00908 1.401 +3 131.046 10.382 -4.6836 0.0524 17 500.0 0.01004 2.512 +4 338.959 4.966 -7.1747 0.0173 25 1200.0 0.00792 2.901 +5 166.280 3.956 -4.0865 0.0621 25 800.0 0.00699 -6.489 diff --git a/astropy/io/ascii/tests/t/sextractor3.dat b/astropy/io/ascii/tests/t/sextractor3.dat new file mode 100644 index 0000000..51adb21 --- /dev/null +++ b/astropy/io/ascii/tests/t/sextractor3.dat @@ -0,0 +1,10 @@ +# 1 X_IMAGE Object position along x [pixel] +# 2 Y_IMAGE [pixel] +# 3 ALPHA_J2000 Right ascension of barycenter (J2000) [deg] +# 4 DELTA_J2000 Declination of barycenter (J2000) [deg] +# 5 MAG_AUTO Kron-like elliptical aperture magnitude [mag] +# 6 MAGERR_AUTO RMS error for AUTO magnitude [mag] +# 7 MAG_APER Fixed aperture magnitude vector [mag] +# 14 MAGERR_APER RMS error vector for fixed aperture mag. [mag] + 1367.000 184.404 265.1445228 +68.7507679 22.9929 0.2218 24.1804 23.4541 22.9567 22.5162 22.1912 21.5363 21.0361 0.3262 0.2675 0.2203 0.1856 0.1683 0.1621 0.1673 + 1380.235 189.444 265.1384412 +68.7516124 20.9258 0.0569 22.2374 21.5987 21.2943 21.1244 20.9838 20.6672 20.0695 0.0645 0.0497 0.0495 0.0520 0.0533 0.0602 0.0515 diff --git a/astropy/io/ascii/tests/t/short.rdb b/astropy/io/ascii/tests/t/short.rdb new file mode 100644 index 0000000..29f300d --- /dev/null +++ b/astropy/io/ascii/tests/t/short.rdb @@ -0,0 +1,14 @@ + +# blank lines + +agasc_id n_noids n_obs +N N N +115345072 1 1 + # comment +335416352 3 8 +266612160 1 1 +645803280 1 1 +117309912 1 1 +114950920 1 1 +335025040 2 24 + diff --git a/astropy/io/ascii/tests/t/short.rdb.bz2 b/astropy/io/ascii/tests/t/short.rdb.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..9e6c49586d7fb2431a85a80bed6ab19f5892743b GIT binary patch literal 146 zcmV;D0B!$5T4*^jL0KkKSrwK*L;wH_Uw{BGNC1D}00961zORe`AOLbAnl_E98USQ? zpwz)Iq}raRr~#tkt$PA6_#JrKP7Ij2m*Qp{k7tMFowB8DWLQPC`-zj$Jh~>Pk<~-B z6*8j1wNfb|u|eJzkON30i^eP$?VqdIofT#x$upq<9s@^?Qa{DqkxmpO1!a&C06>5{ AQvd(} literal 0 HcmV?d00001 diff --git a/astropy/io/ascii/tests/t/short.rdb.gz b/astropy/io/ascii/tests/t/short.rdb.gz new file mode 100644 index 0000000000000000000000000000000000000000..92172ff323db945d85debb97ccba8c8639d9f130 GIT binary patch literal 148 zcmV;F0BipriwFpfa2rqp19NC^a&#_oWMTjz<#u{)I?e^@`oT| zO9!QYBJ41S;qivS$*pIuA9t2|bj1A5eOt;7b*ZiFeRNwpzF-8IO})`9z`*Dxx3%j! z>K9ebb5Ijdh$tZh77ldJg11O2@ge3}DJ7Oa`DB#japQ%=$P);NNBjZcsQfpa0000f CEJO$Z literal 0 HcmV?d00001 diff --git a/astropy/io/ascii/tests/t/short.rdb.xz b/astropy/io/ascii/tests/t/short.rdb.xz new file mode 100644 index 0000000000000000000000000000000000000000..93faba9853444db60d0f4a0597923b097d288600 GIT binary patch literal 192 zcmV;x06+izH+ooF000E$*0e?f03iVu0001VFXf})0Gj}ST>u3Mi~*H-Xddfk*1Xu` zDZ^jD$bU}x#R!Kwe09YS?7$~~_HW@5b@S3#*?ObbNZVfs&S)e;6@l-Z+dmFyFtyYm z`>|FS19ohc8LPxBj_Fff(9|L~?l;&Bc%ifCIiB?qW?+mgcnccwmDRQG`-LaO&%Gw3 uj38ab-m9Li0002h^+J;=`k6cc0h|Gx0RRAgAH`y^#Ao{g000001X)^JQdRN* literal 0 HcmV?d00001 diff --git a/astropy/io/ascii/tests/t/short.tab b/astropy/io/ascii/tests/t/short.tab new file mode 100644 index 0000000..66e3e7f --- /dev/null +++ b/astropy/io/ascii/tests/t/short.tab @@ -0,0 +1,8 @@ +agasc_id n_noids n_obs +115345072 1 1 +335416352 3 8 +266612160 1 1 +645803280 1 1 +117309912 1 1 +114950920 1 1 +335025040 2 24 diff --git a/astropy/io/ascii/tests/t/simple.txt b/astropy/io/ascii/tests/t/simple.txt new file mode 100644 index 0000000..d0a4e27 --- /dev/null +++ b/astropy/io/ascii/tests/t/simple.txt @@ -0,0 +1,4 @@ + 'test 1a' test2 test3 test4 + # fun1 fun2 fun3 fun4 fun5 + top1 top2 top3 top4 +hat1 hat2 hat3 hat4 diff --git a/astropy/io/ascii/tests/t/simple2.txt b/astropy/io/ascii/tests/t/simple2.txt new file mode 100644 index 0000000..0fbb9f6 --- /dev/null +++ b/astropy/io/ascii/tests/t/simple2.txt @@ -0,0 +1,4 @@ +obsid | redshift | X | Y | object | rad +3102 | 0.32 | 4167 | 4085 | Q1250+568-A | 9 +3102 | 0.32 | 4706 | 3916 | Q1250+568-B | 14 +877 | 0.22 | 4378 | 3892 | 'Source 82' | 12.5 diff --git a/astropy/io/ascii/tests/t/simple3.txt b/astropy/io/ascii/tests/t/simple3.txt new file mode 100644 index 0000000..ab6e657 --- /dev/null +++ b/astropy/io/ascii/tests/t/simple3.txt @@ -0,0 +1,3 @@ +obsid|redshift|X|Y|object|rad +877|0.22|4378|3892|'Sou,rce82'|12.5 +3102|0.32|4167|4085|Q1250+568-A|9 diff --git a/astropy/io/ascii/tests/t/simple4.txt b/astropy/io/ascii/tests/t/simple4.txt new file mode 100644 index 0000000..62e922a --- /dev/null +++ b/astropy/io/ascii/tests/t/simple4.txt @@ -0,0 +1,3 @@ +3102 | 0.32 | 4167 | 4085 | Q1250+568-A | 9 +3102 | 0.32 | 4706 | 3916 | Q1250+568-B | 14 +877 | 0.22 | 4378 | 3892 | 'Source 82' | 12.5 diff --git a/astropy/io/ascii/tests/t/simple5.txt b/astropy/io/ascii/tests/t/simple5.txt new file mode 100644 index 0000000..7718404 --- /dev/null +++ b/astropy/io/ascii/tests/t/simple5.txt @@ -0,0 +1,4 @@ +# Purposely make an ill-formed data file (in last row) +3102 | 0.32 | 4167 | 4085 | Q1250+568-A | 9 +3102 | 0.32 | 4706 | 3916 | Q1250+568-B | 14 +877 | 4378 | 3892 | 'Source 82' | 12.5 diff --git a/astropy/io/ascii/tests/t/simple_csv.csv b/astropy/io/ascii/tests/t/simple_csv.csv new file mode 100644 index 0000000..efb9823 --- /dev/null +++ b/astropy/io/ascii/tests/t/simple_csv.csv @@ -0,0 +1,3 @@ +a,b,c +1,2,3 +4,5,6 \ No newline at end of file diff --git a/astropy/io/ascii/tests/t/simple_csv_missing.csv b/astropy/io/ascii/tests/t/simple_csv_missing.csv new file mode 100644 index 0000000..9c87d2d --- /dev/null +++ b/astropy/io/ascii/tests/t/simple_csv_missing.csv @@ -0,0 +1,3 @@ +a,b,c +1 +4,5,6 diff --git a/astropy/io/ascii/tests/t/space_delim_blank_lines.txt b/astropy/io/ascii/tests/t/space_delim_blank_lines.txt new file mode 100644 index 0000000..b096c4f --- /dev/null +++ b/astropy/io/ascii/tests/t/space_delim_blank_lines.txt @@ -0,0 +1,8 @@ +obsid offset x y name oaa + +3102 0.32 4167 4085 Q1250+568-A 9 +3102 0.32 4706 3916 Q1250+568-B 14 +877 0.22 4378 3892 "Source 82" 12.5 + + + diff --git a/astropy/io/ascii/tests/t/space_delim_no_header.dat b/astropy/io/ascii/tests/t/space_delim_no_header.dat new file mode 100644 index 0000000..f8cfc69 --- /dev/null +++ b/astropy/io/ascii/tests/t/space_delim_no_header.dat @@ -0,0 +1,2 @@ +1 3.4 hello +2 6.4 world diff --git a/astropy/io/ascii/tests/t/space_delim_no_names.dat b/astropy/io/ascii/tests/t/space_delim_no_names.dat new file mode 100644 index 0000000..2c3f803 --- /dev/null +++ b/astropy/io/ascii/tests/t/space_delim_no_names.dat @@ -0,0 +1,2 @@ +1 2 +3 4 diff --git a/astropy/io/ascii/tests/t/test4.dat b/astropy/io/ascii/tests/t/test4.dat new file mode 100644 index 0000000..329d5f4 --- /dev/null +++ b/astropy/io/ascii/tests/t/test4.dat @@ -0,0 +1,12 @@ +# whitespace separated +zabs1.nh p1.gamma p1.ampl statname statval + 0.0872113431031 1.26764500000 0.000699751823872 input 0.0 +0.0863775314648 1.26769713012 0.000698799851356 chi2constvar 494.396534577 +0.0839710433091 1.25997502704 0.000696444029148 chi2modvar 497.56468441 +0.0867933991271 1.27045571779 0.000699526507899 cash -579508.340504 + # comment here +0.0913252611282 1.28738450369 0.000703999531569 chi2gehrels 416.904139981 +0.0943815607455 1.29839188657 0.000708725775733 chi2datavar 572.734008 +0.0943792771442 1.29837677223 0.00070871697621 chi2xspecvar 572.734013473 +0.0867953584196 1.27046735536 0.000699532088738 cstat 512.433488994 +0.0846479114132 1.26584338176 0.000697063608605 chi2constvar 440.651434041 diff --git a/astropy/io/ascii/tests/t/test5.dat b/astropy/io/ascii/tests/t/test5.dat new file mode 100644 index 0000000..316c4ff --- /dev/null +++ b/astropy/io/ascii/tests/t/test5.dat @@ -0,0 +1,22 @@ +# whitespace separated with lines to skip +------------------------------------------ +zabs1.nh p1.gamma p1.ampl statname statval +------------------------------------------ +0.095196313612 1.29238107724 0.000709438701165 chi2xspecvar 455.385700456 +0.0898827896112 1.27317260145 0.000703680688865 cstat 450.402806957 +0.0845373292976 1.26032264432 0.000697817633266 chi2constvar 427.888401816 +0.0813955290921 1.25278166998 0.000694773889339 chi2modvar 422.655226097 +0.0837813193374 1.26108631851 0.000697168659777 cash -582096.060739 +0.0877788113875 1.27498889089 0.000700963122261 chi2gehrels 336.255262001 +0.0886095763534 1.27831934755 0.000702152760295 chi2datavar 427.87097831 +0.0886062881606 1.27831561342 0.000702152575029 chi2xspecvar 427.870972282 +0.0837839157029 1.26109967845 0.000697177275745 cstat 423.869897301 +0.0848856095291 1.26216881055 0.000697245258092 chi2constvar 495.692552206 +0.0834040516574 1.25034791909 0.000694504650678 chi2modvar 448.488349352 +0.0863275923367 1.25920642303 0.000697302969088 cash -581109.867406 +0.0910593842926 1.27434931431 0.000701687557965 chi2gehrels 362.107884887 +0.0925984360666 1.27857224315 0.000703586368322 chi2datavar 467.653055046 +0.0926057133247 1.27858701992 0.000703594356786 chi2xspecvar 467.653060082 +0.0863257498551 1.259192667 0.000697300429366 cstat 451.536967896 +0.0880503692681 1.2588289844 0.000698437310968 chi2constvar 439.513117058 +0.0852962921333 1.25214407357 0.000696223065852 chi2modvar 443.456904712 diff --git a/astropy/io/ascii/tests/t/vizier/ReadMe b/astropy/io/ascii/tests/t/vizier/ReadMe new file mode 100644 index 0000000..b85d8d5 --- /dev/null +++ b/astropy/io/ascii/tests/t/vizier/ReadMe @@ -0,0 +1,89 @@ +J/A+A/511/A56 Abundances of five open clusters (Pancino+, 2010) +================================================================================ +Chemical abundance analysis of the open clusters Cr 110, NGC 2420, NGC 7789, +and M 67 (NGC 2682). + Pancino E., Carrera R., Rossetti, E., Gallart C. + + =2010A&A...511A..56P +================================================================================ +ADC_Keywords: Clusters, open ; Stars, giant ; Equivalent widths ; Spectroscopy +Keywords: stars: abundances - Galaxy: disk - + open clusters and associations: general + +Abstract: + The present number of Galactic open clusters that have high resolution + abundance determinations, not only of [Fe/H], but also of other key + elements, is largely insufficient to enable a clear modeling of the + Galactic disk chemical evolution. To increase the number of Galactic + open clusters with high quality measurements, we obtained high + resolution (R~30000), high quality (S/N~50-100 per pixel), echelle + spectra with the fiber spectrograph FOCES, at Calar Alto, Spain, for + three red clump stars in each of five Open Clusters. We used the + classical equivalent width analysis method to obtain accurate + abundances of sixteen elements: Al, Ba, Ca, Co, Cr, Fe, La, Mg, Na, + Nd, Ni, Sc, Si, Ti, V, and Y. We also derived the oxygen abundance + using spectral synthesis of the 6300{AA} forbidden line. + +Description: + Atomic data and equivalent widths for 15 red clump giants in 5 open + clusters: Cr 110, NGC 2099, NGC 2420, M 67, NGC 7789. + +File Summary: +-------------------------------------------------------------------------------- + FileName Lrecl Records Explanations +-------------------------------------------------------------------------------- +ReadMe 80 . This file +table1.dat 103 15 Observing logs and programme stars information +table5.dat 56 5265 Atomic data and equivalent widths +-------------------------------------------------------------------------------- + +See also: + J/A+A/455/271 : Abundances of red giants in NGC 6441 (Gratton+, 2006) + J/A+A/464/953 : Abundances of red giants in NGC 6441 (Gratton+, 2007) + J/A+A/505/117 : Abund. of red giants in 15 globular clusters (Carretta+, 2009) + +Byte-by-byte Description of file: table1.dat +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 7 A7 --- Cluster Cluster name + 9- 12 I4 --- Star Star number within the cluster + 14- 15 I2 h RAh Right ascension (J2000) + 17- 18 I2 min RAm Right ascension (J2000) + 20- 23 F4.1 s RAs Right ascension (J2000) + 25 A1 --- DE- Declination sign (J2000) + 26- 27 I2 deg DEd Declination (J2000) + 29- 30 I2 arcmin DEm Declination (J2000) + 32- 35 F4.1 arcsec DEs Declination (J2000) + 37- 41 F5.2 mag Bmag B magnitude + 43- 47 F5.2 mag Vmag V magnitude + 49- 53 F5.2 mag Icmag ?=- Cousins I magnitude + 55- 59 F5.2 mag Rmag ?=- R magnitude + 61- 65 F5.2 mag Ksmag Ks magnitude + 67 I1 --- NExp Number of exposures + 69- 73 I5 s TExp Total exposure time + 75- 77 I3 --- S/N Signal-to-nois ratio + 79-103 A25 --- SName Simbad name +-------------------------------------------------------------------------------- + +Byte-by-byte Description of file: table5.dat +-------------------------------------------------------------------------------- + Bytes Format Units Label Explanations +-------------------------------------------------------------------------------- + 1- 7 A7 --- Cluster Cluster name + 9- 12 I4 --- Star Star number within the cluster + 14- 20 F7.2 0.1nm Wave Wavelength in Angstroms + 22- 23 A2 --- El Element name + 24 I1 --- ion Ionization stage (1 for neutral element) + 26- 30 F5.2 eV chiEx Excitation potential + 32- 37 F6.2 --- loggf Logarithm of the oscillator strength + 39- 43 F5.1 0.1pm EW ?=-9.9 Equivalent width (in mA) + 46- 49 F4.1 0.1pm e_EW ?=-9.9 rms uncertainty on EW + 51- 56 F6.3 --- Q ?=-9.999 DAOSPEC quality parameter Q + (large values are bad) +-------------------------------------------------------------------------------- + +Acknowledgements: + Elena Pancino, elena.pancino(at)oabo.inaf.it +================================================================================ +(End) Elena Pancino [INAF-OABo, Italy], Patricia Vannier [CDS] 23-Nov-2009 diff --git a/astropy/io/ascii/tests/t/vizier/table1.dat b/astropy/io/ascii/tests/t/vizier/table1.dat new file mode 100644 index 0000000..42d97d0 --- /dev/null +++ b/astropy/io/ascii/tests/t/vizier/table1.dat @@ -0,0 +1,15 @@ +Cr110 2108 06 38 52.5 +02 01 58.4 14.79 13.35 -- --- 9.76 6 16200 70 Cl* Collinder 110 DI 2108 +Cr110 2129 06 38 41.1 +02 01 05.5 15.00 13.66 12.17 12.94 10.29 7 18900 70 Cl* Collinder 110 DI 2129 +Cr110 3144 06 38 30.3 +02 03 03.0 14.80 13.49 12.04 12.72 10.19 6 16195 65 Cl* Collinder 110 DI 3144 +NGC2099 67 05 52 16.6 +32 34 45.6 12.38 11.12 9.87 --- 8.17 3 3600 95 NGC 2099 67 +NGC2099 148 05 52 08.1 +32 30 33.1 12.36 11.09 - --- 8.05 3 3600 105 NGC 2099 148 +NGC2099 508 05 52 33.2 +32 27 43.5 12.24 10.98 -- --- 7.92 3 3900 85 NGC 2099 508 +NGC2420 41 07 38 06.2 +21 36 54.7 13.75 12.67 11.61 12.13 10.13 5 9000 70 NGC 2420 41 +NGC2420 76 07 38 15.5 +21 38 01.8 13.65 12.66 11.65 12.14 10.31 5 9000 75 NGC 2420 76 +NGC2420 174 07 38 26.9 +21 38 24.8 13.41 12.40 ---- --- 9.98 5 9000 60 NGC 2420 174 +NGC2682 141 08 51 22.8 +11 48 01.7 11.59 10.48 9.40 9.92 7.92 3 2700 85 Cl* NGC 2682 MMU 141 +NGC2682 223 08 51 43.9 +11 56 42.3 11.68 10.58 9.50 10.02 8.00 3 2700 85 Cl* NGC 2682 MMU 223 +NGC2682 286 08 52 18.6 +11 44 26.3 11.53 10.47 9.43 9.93 7.92 3 2700 105 Cl* NGC 2682 MMU 286 +NGC7789 5237 23 56 50.6 +56 49 20.9 13.92 12.81 11.52 --- 9.89 5 9000 70 Cl* NGC 7789 G 5237 +NGC7789 7840 23 57 19.3 +56 40 51.5 14.03 12.82 11.49 --- 9.83 6 9000 75 Cl* NGC 7789 G 7840 +NGC7789 8556 23 57 27.6 +56 45 39.2 14.18 12.97 11.65 --- 10.03 3 5400 45 Cl* NGC 7789 G 8556 diff --git a/astropy/io/ascii/tests/t/vizier/table5.dat b/astropy/io/ascii/tests/t/vizier/table5.dat new file mode 100644 index 0000000..61bc51a --- /dev/null +++ b/astropy/io/ascii/tests/t/vizier/table5.dat @@ -0,0 +1,49 @@ +Cr110 2108 6696.79 Al1 4.02 -1.42 29.5 2.2 0.289 +Cr110 2108 6698.67 Al1 3.14 -1.65 58.0 2.0 0.325 +Cr110 2108 7361.57 Al1 4.02 -0.90 44.1 4.0 0.510 +Cr110 2108 7362.30 Al1 4.02 -0.75 62.7 3.9 0.577 +Cr110 2108 7835.31 Al1 4.02 -0.65 73.7 6.6 0.539 +Cr110 2108 7836.13 Al1 4.02 -0.49 87.6 4.1 0.390 +Cr110 2108 8772.86 Al1 4.02 -0.32 87.6 5.1 0.957 +Cr110 2108 8773.90 Al1 4.02 -0.16 118.6 14.6 0.736 +Cr110 2108 5853.67 Ba2 0.60 -1.00 121.9 5.5 1.435 +Cr110 2108 6141.71 Ba2 0.70 -0.08 191.0 8.7 1.117 +Cr110 2108 6496.90 Ba2 0.60 -0.38 175.8 6.8 1.473 +Cr110 2108 5261.70 Ca1 2.52 -0.59 149.1 5.3 0.808 +Cr110 2108 5512.98 Ca1 2.93 -0.71 106.7 6.2 1.416 +Cr110 2108 5857.45 Ca1 2.93 0.26 163.8 19.8 2.209 +Cr110 2108 6156.02 Ca1 2.52 -2.50 42.0 4.0 0.617 +Cr110 2108 6166.44 Ca1 2.52 -1.16 110.7 3.3 1.046 +Cr110 2108 6169.04 Ca1 2.52 -0.80 127.3 5.5 1.604 +Cr110 2108 6169.56 Ca1 2.53 -0.53 148.2 6.0 1.419 +Cr110 2108 6471.66 Ca1 2.53 -0.65 130.4 5.0 1.431 +Cr110 2108 6499.65 Ca1 2.52 -0.72 129.0 5.4 1.183 +Cr110 2108 5230.20 Co1 1.74 -1.84 60.4 6.7 1.210 +Cr110 2108 5530.77 Co1 1.71 -2.06 73.2 4.3 1.005 +Cr110 2108 5590.72 Co1 2.04 -1.87 69.9 3.2 0.706 +Cr110 2108 5935.38 Co1 1.88 -2.68 33.0 4.4 0.665 +Cr110 2108 6429.91 Co1 2.14 -2.41 28.2 1.3 0.340 +Cr110 2108 6490.34 Co1 2.04 -2.52 33.6 3.5 0.323 +Cr110 2108 6632.43 Co1 2.28 -2.00 50.9 2.1 0.391 +Cr110 2108 7154.67 Co1 2.04 -2.42 45.9 1.9 0.280 +Cr110 2108 7388.69 Co1 2.72 -1.65 36.6 1.8 0.343 +Cr110 2108 7417.37 Co1 2.04 -2.07 71.4 1.9 0.369 +Cr110 2108 7838.13 Co1 3.97 -0.30 32.7 2.7 0.495 +Cr110 2108 5243.36 Cr1 3.40 -0.57 47.9 4.0 0.828 +Cr110 2108 5329.14 Cr1 2.91 -0.06 110.4 4.9 1.113 +Cr110 2108 5442.37 Cr1 3.42 -1.06 33.3 2.5 0.499 +Cr110 2108 5712.75 Cr1 3.01 -1.30 49.4 5.3 1.038 +Cr110 2108 5788.39 Cr1 3.01 -1.83 26.1 1.3 0.260 +Cr110 2108 5844.59 Cr1 3.01 -1.76 26.2 3.9 0.863 +Cr110 2108 6330.09 Cr1 0.94 -2.92 94.4 6.6 1.638 +Cr110 2108 6537.93 Cr1 1.00 -4.07 33.0 2.4 0.479 +Cr110 2108 6630.01 Cr1 1.03 -3.56 60.7 1.5 0.232 +Cr110 2108 6661.08 Cr1 4.19 -0.19 33.5 6.4 0.627 +Cr110 2108 7355.94 Cr1 2.89 -0.28 126.7 4.1 0.671 +Cr110 2108 5055.99 Fe1 4.31 -2.01 41.2 3.3 0.371 +Cr110 2108 5178.80 Fe1 4.39 -1.84 45.4 7.1 0.851 +Cr110 2108 5285.13 Fe1 4.43 -1.64 50.1 5.2 0.607 +Cr110 2108 5294.55 Fe1 3.64 -2.86 -9.9 -9.9 -9.999 +Cr110 2108 5295.31 Fe1 4.42 -1.69 38.3 9.5 1.958 +Cr110 2108 5373.71 Fe1 4.47 -0.86 91.5 5.3 1.416 +Cr110 2108 5386.33 Fe1 4.15 -1.77 55.9 6.6 0.949 diff --git a/astropy/io/ascii/tests/t/vots_spec.dat b/astropy/io/ascii/tests/t/vots_spec.dat new file mode 100644 index 0000000..bc90130 --- /dev/null +++ b/astropy/io/ascii/tests/t/vots_spec.dat @@ -0,0 +1,99 @@ +#################################################################################### +## +## VOTable-Simple Specification +## +## This is the specification of the VOTable-Simple (VOTS) format, given as an +## example data table with comments and references. This data table format is +## intented to provide a way of specifying metadata and data for simple tabular +## data sets. This specification is intended as a subset of the VOTable data +## model and allow easy generation of a VOTable-compliant data structure. This +## provides a uniform starting point for generating table documentation and +## performing database table creation and ingest. +## +## A python application is available which uses the STILTS java package to +## convert from a VOTS format to any of the (many) output formats supported by +## STILTS. This application can also generate a documentation file (in +## reStructured Text format) or a Django model definition from a VOTS table. +## +## Key VOTable and STILTS references: +## Full spec: http://www.ivoa.net/Documents/latest/VOT.html +## Datatypes: http://www.ivoa.net/Documents/REC/VOTable/VOTable-20040811.html#ToC11 +## FIELD def: http://www.ivoa.net/Documents/REC/VOTable/VOTable-20040811.html#ToC25 +## STILTS : http://www.star.bris.ac.uk/~mbt/stilts/ +## +## The VOTable-Simple format consists of header information followed by the tabular +## data elements. The VOTS header lines are all preceded by a single '#' character. +## Comments are preceded by '##' at the beginning of a line. +## +## The VOTS header defines the metadata associated with the table. In the +## VOTable-Simple format words in all CAPS (followed by ::) refer to the +## corresponding metadata elements in the VOTable specification. For instance +## the DESCRIPTION:: keyword precedes the lines that are used in the VOTable +## element. The COOSYS::, PARAM::, and FIELD:: keywords are +## each followed by a whitespace-delimited table that defines the corresponding +## VOTable elements and attributes. +## +## The actual table data must follow the header and consist of space or tab delimited +## data fields. The chosen delimiter must be used consistently througout the table. +## +##---------------------------------------------------------------------------------- +## Table description, corresponding to the VOTable TABLE::DESCRIPTION element. +##---------------------------------------------------------------------------------- +# DESCRIPTION:: +# This is a sample table that shows a proposed format for generation of tables +# for the C-COSMOS collaboration. This format is compatible with simple 'awk' or +# S-mongo style processing but also allows full self-documentation and conversion +# to more robust data formats (FITS, VOTable, postgres database ingest, etc). +# +##---------------------------------------------------------------------------------- +## Coordinate system specification COOSYS. This is a "future" feature, as the +## current conversion code does not use this field. +##---------------------------------------------------------------------------------- +# COOSYS:: +# ID equinox epoch system +# J2000 J2000. J2000. eq_FK5 +# +##---------------------------------------------------------------------------------- +## Set the TABLE::PARAM values, which are values that apply for the entire table. +##---------------------------------------------------------------------------------- +# PARAM:: +# name datatype value description +# version string 1.1 'Table version' +# date string 2007/12/01 'Table release date' +# +##---------------------------------------------------------------------------------- +## Define the column names via the FIELD element. The attributes 'name', +## 'datatype', 'unit', and 'description' are required. Optional attributes are: +## 'width', 'precision', 'ucd', 'utype', 'ref', and 'type'. +## See http://www.ivoa.net/Documents/REC/VOTable/VOTable-20040811.html#ToC25 for +## the VOTable defintions. +## Allowed values of datatype are: +## boolean, unsignedByte, short, int, long, string, float, double +## Units: (from http://www.ivoa.net/Documents/REC/VOTable/VOTable-20040811.html#sec:unit) +## The quantities in a column of the table may be expressed in some physical +## unit, which is specified by the unit attribute of the FIELD. The syntax of +## the unit string is defined in reference [3]; it is basically written as a +## string without blanks or spaces, where the symbols . or * indicate a +## multiplication, / stands for the division, and no special symbol is +## required for a power. Examples are unit="m2" for m2, unit="cm-2.s-1.keV-1" +## for cm-2s-1keV-1, or unit="erg/s" for erg s-1. The references [3] provide +## also the list of the valid symbols, which is essentially restricted to the +## Systeme International (SI) conventions, plus a few astronomical extensions +## concerning units used for time, angular, distance and energy measurements. +##---------------------------------------------------------------------------------- +# FIELD:: +# name datatype unit ucd description +# id int '' 'meta.id' 'C-COSMOS short identifier number' +# name string '' '' 'C-COSMOS long identifier name' +# ra double deg 'meta.cryptic' 'Right Ascension' +# dec double deg '' Declination +# flux float erg/cm2/s '' Flux +# +##---------------------------------------------------------------------------------- +## Now the actual field data in the order specified by the FIELD:: list. +## The data fields can be separated by tabs or spaces. If using spaces, +## any fields that contain a space must be enclosed in single quotes. +## +12 'CXOCS J193423+022312' 150.01212 2.52322 1.21e-13 +13 'CXOCS J193322+024444' 150.02323 2.54444 1.21e-14 +14 'CXOCS J195555+025555' 150.04444 2.55555 1.21e-15 diff --git a/astropy/io/ascii/tests/t/whitespace.dat b/astropy/io/ascii/tests/t/whitespace.dat new file mode 100644 index 0000000..903c6cd --- /dev/null +++ b/astropy/io/ascii/tests/t/whitespace.dat @@ -0,0 +1,3 @@ + "quoted colname with tab inside" col2 col3 +val1 "val2 with tab" 2 + val3 val4 3 diff --git a/astropy/io/ascii/tests/test_c_reader.py b/astropy/io/ascii/tests/test_c_reader.py new file mode 100644 index 0000000..83144e0 --- /dev/null +++ b/astropy/io/ascii/tests/test_c_reader.py @@ -0,0 +1,1122 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +try: + from cStringIO import StringIO +except ImportError: # cStringIO doesn't exist in Python 3 + from io import BytesIO + StringIO = lambda x: BytesIO(x.encode('ascii')) + +import os +import functools + +from textwrap import dedent + +import pytest +import numpy as np +from numpy import ma + +from ....table import Table, MaskedColumn +from ... import ascii +from ...ascii.core import ParameterError, FastOptionsError +from ...ascii.cparser import CParserError +from ..fastbasic import FastBasic, FastCsv, FastTab, FastCommentedHeader, \ + FastRdb, FastNoHeader +from .common import assert_equal, assert_almost_equal, assert_true +from ....extern import six +from ....extern.six.moves import range + +TRAVIS = os.environ.get('TRAVIS', False) + + +def assert_table_equal(t1, t2, check_meta=False): + assert_equal(len(t1), len(t2)) + assert_equal(t1.colnames, t2.colnames) + if check_meta: + assert_equal(t1.meta, t2.meta) + for name in t1.colnames: + if len(t1) != 0: + assert_equal(t1[name].dtype.kind, t2[name].dtype.kind) + if not isinstance(t1[name], MaskedColumn): + for i, el in enumerate(t1[name]): + try: + if not isinstance(el, six.string_types) and np.isnan(el): + assert_true(not isinstance(t2[name][i], six.string_types) and np.isnan(t2[name][i])) + elif isinstance(el, six.string_types): + assert_equal(el, t2[name][i]) + else: + assert_almost_equal(el, t2[name][i]) + except (TypeError, NotImplementedError): + pass # ignore for now + + +# Use this counter to create a unique filename for each file created in a test +# if this function is called more than once in a single test +_filename_counter = 0 + + +def _read(tmpdir, table, Reader=None, format=None, parallel=False, check_meta=False, **kwargs): + # make sure we have a newline so table can't be misinterpreted as a filename + global _filename_counter + + table += '\n' + reader = Reader(**kwargs) + t1 = reader.read(table) + t2 = reader.read(StringIO(table)) + t3 = reader.read(table.splitlines()) + t4 = ascii.read(table, format=format, guess=False, **kwargs) + t5 = ascii.read(table, format=format, guess=False, fast_reader=False, **kwargs) + assert_table_equal(t1, t2, check_meta=check_meta) + assert_table_equal(t2, t3, check_meta=check_meta) + assert_table_equal(t3, t4, check_meta=check_meta) + assert_table_equal(t4, t5, check_meta=check_meta) + + if parallel: + if TRAVIS: + pytest.xfail("Multiprocessing can sometimes fail on Travis CI") + elif os.name == 'nt': + pytest.xfail("Multiprocessing is currently unsupported on Windows") + t6 = ascii.read(table, format=format, guess=False, fast_reader={ + 'parallel': True}, **kwargs) + assert_table_equal(t1, t6, check_meta=check_meta) + + filename = str(tmpdir.join('table{0}.txt'.format(_filename_counter))) + _filename_counter += 1 + + with open(filename, 'wb') as f: + f.write(table.encode('ascii')) + f.flush() + + t7 = ascii.read(filename, format=format, guess=False, **kwargs) + if parallel: + t8 = ascii.read(filename, format=format, guess=False, fast_reader={ + 'parallel': True}, **kwargs) + + assert_table_equal(t1, t7, check_meta=check_meta) + if parallel: + assert_table_equal(t1, t8, check_meta=check_meta) + return t1 + + +@pytest.fixture(scope='function') +def read_basic(tmpdir, request): + return functools.partial(_read, tmpdir, Reader=FastBasic, format='basic') + + +@pytest.fixture(scope='function') +def read_csv(tmpdir, request): + return functools.partial(_read, tmpdir, Reader=FastCsv, format='csv') + + +@pytest.fixture(scope='function') +def read_tab(tmpdir, request): + return functools.partial(_read, tmpdir, Reader=FastTab, format='tab') + + +@pytest.fixture(scope='function') +def read_commented_header(tmpdir, request): + return functools.partial(_read, tmpdir, Reader=FastCommentedHeader, + format='commented_header') + + +@pytest.fixture(scope='function') +def read_rdb(tmpdir, request): + return functools.partial(_read, tmpdir, Reader=FastRdb, format='rdb') + + +@pytest.fixture(scope='function') +def read_no_header(tmpdir, request): + return functools.partial(_read, tmpdir, Reader=FastNoHeader, + format='no_header') + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_simple_data(parallel, read_basic): + """ + Make sure the fast reader works with basic input data. + """ + table = read_basic("A B C\n1 2 3\n4 5 6", parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + +def test_read_types(): + """ + Make sure that the read() function takes filenames, + strings, and lists of strings in addition to file-like objects. + """ + t1 = ascii.read("a b c\n1 2 3\n4 5 6", format='fast_basic', guess=False) + # TODO: also read from file + t2 = ascii.read(StringIO("a b c\n1 2 3\n4 5 6"), format='fast_basic', guess=False) + t3 = ascii.read(["a b c", "1 2 3", "4 5 6"], format='fast_basic', guess=False) + assert_table_equal(t1, t2) + assert_table_equal(t2, t3) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_supplied_names(parallel, read_basic): + """ + If passed as a parameter, names should replace any + column names found in the header. + """ + table = read_basic("A B C\n1 2 3\n4 5 6", names=('X', 'Y', 'Z'), parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('X', 'Y', 'Z')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_no_header(parallel, read_basic, read_no_header): + """ + The header should not be read when header_start=None. Unless names is + passed, the column names should be auto-generated. + """ + # Cannot set header_start=None for basic format + with pytest.raises(ValueError): + read_basic("A B C\n1 2 3\n4 5 6", header_start=None, data_start=0, parallel=parallel) + + t2 = read_no_header("A B C\n1 2 3\n4 5 6", parallel=parallel) + expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('col1', 'col2', 'col3')) + assert_table_equal(t2, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_no_header_supplied_names(parallel, read_basic, read_no_header): + """ + If header_start=None and names is passed as a parameter, header + data should not be read and names should be used instead. + """ + table = read_no_header("A B C\n1 2 3\n4 5 6", + names=('X', 'Y', 'Z'), parallel=parallel) + expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('X', 'Y', 'Z')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_comment(parallel, read_basic): + """ + Make sure that line comments are ignored by the C reader. + """ + table = read_basic("# comment\nA B C\n # another comment\n1 2 3\n4 5 6", parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_empty_lines(parallel, read_basic): + """ + Make sure that empty lines are ignored by the C reader. + """ + table = read_basic("\n\nA B C\n1 2 3\n\n\n4 5 6\n\n\n\n", parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_lstrip_whitespace(parallel, read_basic): + """ + Test to make sure the reader ignores whitespace at the beginning of fields. + """ + text = """ + 1, 2, \t3 + A,\t\t B, C + a, b, c +""" + ' \n' + + table = read_basic(text, delimiter=',', parallel=parallel) + expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_rstrip_whitespace(parallel, read_basic): + """ + Test to make sure the reader ignores whitespace at the end of fields. + """ + text = ' 1 ,2 \t,3 \nA\t,B ,C\t \t \n \ta ,b , c \n' + table = read_basic(text, delimiter=',', parallel=parallel) + expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_conversion(parallel, read_basic): + """ + The reader should try to convert each column to ints. If this fails, the + reader should try to convert to floats. Failing this, it should fall back + to strings. + """ + text = """ +A B C D E +1 a 3 4 5 +2. 1 9 10 -5.3e4 +4 2 -12 .4 six +""" + table = read_basic(text, parallel=parallel) + assert_equal(table['A'].dtype.kind, 'f') + assert table['B'].dtype.kind in ('S', 'U') + assert_equal(table['C'].dtype.kind, 'i') + assert_equal(table['D'].dtype.kind, 'f') + assert table['E'].dtype.kind in ('S', 'U') + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_delimiter(parallel, read_basic): + """ + Make sure that different delimiters work as expected. + """ + text = """ +COL1 COL2 COL3 +1 A -1 +2 B -2 +""" + expected = Table([[1, 2], ['A', 'B'], [-1, -2]], names=('COL1', 'COL2', 'COL3')) + + for sep in ' ,\t#;': + table = read_basic(text.replace(' ', sep), delimiter=sep, parallel=parallel) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_include_names(parallel, read_basic): + """ + If include_names is not None, the parser should read only those columns in include_names. + """ + table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", include_names=['A', 'D'], parallel=parallel) + expected = Table([[1, 5], [4, 8]], names=('A', 'D')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_exclude_names(parallel, read_basic): + """ + If exclude_names is not None, the parser should exclude the columns in exclude_names. + """ + table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", exclude_names=['A', 'D'], parallel=parallel) + expected = Table([[2, 6], [3, 7]], names=('B', 'C')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_include_exclude_names(parallel, read_basic): + """ + Make sure that include_names is applied before exclude_names if both are specified. + """ + text = """ +A B C D E F G H +1 2 3 4 5 6 7 8 +9 10 11 12 13 14 15 16 +""" + table = read_basic(text, include_names=['A', 'B', 'D', 'F', 'H'], + exclude_names=['B', 'F'], parallel=parallel) + expected = Table([[1, 9], [4, 12], [8, 16]], names=('A', 'D', 'H')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_quoted_fields(parallel, read_basic): + """ + The character quotechar (default '"') should denote the start of a field which can + contain the field delimiter and newlines. + """ + if parallel: + pytest.xfail("Multiprocessing can fail with quoted fields") + text = """ +"A B" C D +1.5 2.1 -37.1 +a b " c + d" +""" + table = read_basic(text, parallel=parallel) + expected = Table([['1.5', 'a'], ['2.1', 'b'], ['-37.1', 'cd']], names=('A B', 'C', 'D')) + assert_table_equal(table, expected) + table = read_basic(text.replace('"', "'"), quotechar="'", parallel=parallel) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("key,val", [ + ('delimiter', ',,'), # multi-char delimiter + ('comment', '##'), # multi-char comment + ('data_start', None), # data_start=None + ('data_start', -1), # data_start negative + ('quotechar', '##'), # multi-char quote signifier + ('header_start', -1), # negative header_start + ('converters', dict((i + 1, ascii.convert_numpy(np.uint)) for i in range(3))), # passing converters + ('Inputter', ascii.ContinuationLinesInputter), # passing Inputter + ('header_Splitter', ascii.DefaultSplitter), # passing Splitter + ('data_Splitter', ascii.DefaultSplitter)]) +def test_invalid_parameters(key, val): + """ + Make sure the C reader raises an error if passed parameters it can't handle. + """ + with pytest.raises(ParameterError): + FastBasic(**{key: val}).read('1 2 3\n4 5 6') + with pytest.raises(ParameterError): + ascii.read('1 2 3\n4 5 6', + format='fast_basic', guess=False, **{key: val}) + + +def test_invalid_parameters_other(): + with pytest.raises(TypeError): + FastBasic(foo=7).read('1 2 3\n4 5 6') # unexpected argument + with pytest.raises(FastOptionsError): # don't fall back on the slow reader + ascii.read('1 2 3\n4 5 6', format='basic', fast_reader={'foo': 7}) + with pytest.raises(ParameterError): + # Outputter cannot be specified in constructor + FastBasic(Outputter=ascii.TableOutputter).read('1 2 3\n4 5 6') + + +def test_too_many_cols1(): + """ + If a row contains too many columns, the C reader should raise an error. + """ + text = """ +A B C +1 2 3 +4 5 6 +7 8 9 10 +11 12 13 +""" + with pytest.raises(CParserError) as e: + table = FastBasic().read(text) + assert 'CParserError: an error occurred while parsing table data: too many ' \ + 'columns found in line 3 of data' in str(e) + + +def test_too_many_cols2(): + text = """\ +aaa,bbb +1,2, +3,4, +""" + with pytest.raises(CParserError) as e: + table = FastCsv().read(text) + assert 'CParserError: an error occurred while parsing table data: too many ' \ + 'columns found in line 1 of data' in str(e) + + +def test_too_many_cols3(): + text = """\ +aaa,bbb +1,2,, +3,4, +""" + with pytest.raises(CParserError) as e: + table = FastCsv().read(text) + assert 'CParserError: an error occurred while parsing table data: too many ' \ + 'columns found in line 1 of data' in str(e) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_not_enough_cols(parallel, read_csv): + """ + If a row does not have enough columns, the FastCsv reader should add empty + fields while the FastBasic reader should raise an error. + """ + text = """ +A,B,C +1,2,3 +4,5 +6,7,8 +""" + table = read_csv(text, parallel=parallel) + assert table['B'][1] is not ma.masked + assert table['C'][1] is ma.masked + + with pytest.raises(CParserError) as e: + table = FastBasic(delimiter=',').read(text) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_data_end(parallel, read_basic, read_rdb): + """ + The parameter data_end should specify where data reading ends. + """ + text = """ +A B C +1 2 3 +4 5 6 +7 8 9 +10 11 12 +""" + table = read_basic(text, data_end=3, parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + # data_end supports negative indexing + table = read_basic(text, data_end=-2, parallel=parallel) + assert_table_equal(table, expected) + + text = """ +A\tB\tC +N\tN\tS +1\t2\ta +3\t4\tb +5\t6\tc +""" + # make sure data_end works with RDB + table = read_rdb(text, data_end=-1, parallel=parallel) + expected = Table([[1, 3], [2, 4], ['a', 'b']], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + # positive index + table = read_rdb(text, data_end=3, parallel=parallel) + expected = Table([[1], [2], ['a']], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + # empty table if data_end is too small + table = read_rdb(text, data_end=1, parallel=parallel) + expected = Table([[], [], []], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_inf_nan(parallel, read_basic): + """ + Test that inf and nan-like values are correctly parsed on all platforms. + + Regression test for https://github.com/astropy/astropy/pull/3525 + """ + + text = dedent("""\ + A + nan + +nan + -nan + inf + infinity + +inf + +infinity + -inf + -infinity + """) + + expected = Table({'A': [np.nan, np.nan, np.nan, + np.inf, np.inf, np.inf, np.inf, + -np.inf, -np.inf]}) + + table = read_basic(text, parallel=parallel) + assert table['A'].dtype.kind == 'f' + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_fill_values(parallel, read_basic): + """ + Make sure that the parameter fill_values works as intended. If fill_values + is not specified, the default behavior should be to convert '' to 0. + """ + text = """ +A, B, C +, 2, nan +a, -999, -3.4 +nan, 5, -9999 +8, nan, 7.6e12 +""" + table = read_basic(text, delimiter=',', parallel=parallel) + # The empty value in row A should become a masked '0' + assert isinstance(table['A'], MaskedColumn) + assert table['A'][0] is ma.masked + # '0' rather than 0 because there is a string in the column + assert_equal(table['A'].data.data[0], '0') + assert table['A'][1] is not ma.masked + + table = read_basic(text, delimiter=',', fill_values=('-999', '0'), parallel=parallel) + assert isinstance(table['B'], MaskedColumn) + assert table['A'][0] is not ma.masked # empty value unaffected + assert table['C'][2] is not ma.masked # -9999 is not an exact match + assert table['B'][1] is ma.masked + # Numeric because the rest of the column contains numeric data + assert_equal(table['B'].data.data[1], 0.0) + assert table['B'][0] is not ma.masked + + table = read_basic(text, delimiter=',', fill_values=[], parallel=parallel) + # None of the columns should be masked + for name in 'ABC': + assert not isinstance(table[name], MaskedColumn) + + table = read_basic(text, delimiter=',', fill_values=[('', '0', 'A'), + ('nan', '999', 'A', 'C')], parallel=parallel) + assert np.isnan(table['B'][3]) # nan filling skips column B + assert table['B'][3] is not ma.masked # should skip masking as well as replacing nan + assert table['A'][0] is ma.masked + assert table['A'][2] is ma.masked + assert_equal(table['A'].data.data[0], '0') + assert_equal(table['A'].data.data[2], '999') + assert table['C'][0] is ma.masked + assert_almost_equal(table['C'].data.data[0], 999.0) + assert_almost_equal(table['C'][1], -3.4) # column is still of type float + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_fill_include_exclude_names(parallel, read_csv): + """ + fill_include_names and fill_exclude_names should filter missing/empty value handling + in the same way that include_names and exclude_names filter output columns. + """ + text = """ +A, B, C +, 1, 2 +3, , 4 +5, 5, +""" + table = read_csv(text, fill_include_names=['A', 'B'], parallel=parallel) + assert table['A'][0] is ma.masked + assert table['B'][1] is ma.masked + assert table['C'][2] is not ma.masked # C not in fill_include_names + + table = read_csv(text, fill_exclude_names=['A', 'B'], parallel=parallel) + assert table['C'][2] is ma.masked + assert table['A'][0] is not ma.masked + assert table['B'][1] is not ma.masked # A and B excluded from fill handling + + table = read_csv(text, fill_include_names=['A', 'B'], fill_exclude_names=['B'], parallel=parallel) + assert table['A'][0] is ma.masked + assert table['B'][1] is not ma.masked # fill_exclude_names applies after fill_include_names + assert table['C'][2] is not ma.masked + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_many_rows(parallel, read_basic): + """ + Make sure memory reallocation works okay when the number of rows + is large (so that each column string is longer than INITIAL_COL_SIZE). + """ + text = 'A B C\n' + for i in range(500): # create 500 rows + text += ' '.join([str(i) for i in range(3)]) + text += '\n' + + table = read_basic(text, parallel=parallel) + expected = Table([[0] * 500, [1] * 500, [2] * 500], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_many_columns(parallel, read_basic): + """ + Make sure memory reallocation works okay when the number of columns + is large (so that each header string is longer than INITIAL_HEADER_SIZE). + """ + # create a string with 500 columns and two data rows + text = ' '.join([str(i) for i in range(500)]) + text += ('\n' + text + '\n' + text) + table = read_basic(text, parallel=parallel) + expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)]) + assert_table_equal(table, expected) + + +def test_fast_reader(): + """ + Make sure that ascii.read() works as expected by default and with + fast_reader specified. + """ + text = 'a b c\n1 2 3\n4 5 6' + with pytest.raises(ParameterError): # C reader can't handle regex comment + ascii.read(text, format='fast_basic', guess=False, comment='##') + + # Enable multiprocessing and the fast converter + try: + ascii.read(text, format='basic', guess=False, + fast_reader={'parallel': True, 'use_fast_converter': True}) + except NotImplementedError: + # Might get this on Windows, try without parallel... + if os.name == 'nt': + ascii.read(text, format='basic', guess=False, + fast_reader={'parallel': False, + 'use_fast_converter': True}) + else: + raise + + # Should raise an error if fast_reader has an invalid key + with pytest.raises(FastOptionsError): + ascii.read(text, format='fast_basic', guess=False, fast_reader={'foo': True}) + + # Use the slow reader instead + ascii.read(text, format='basic', guess=False, comment='##', fast_reader=False) + # Will try the slow reader afterwards by default + ascii.read(text, format='basic', guess=False, comment='##') + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_read_tab(parallel, read_tab): + """ + The fast reader for tab-separated values should not strip whitespace, unlike + the basic reader. + """ + if parallel: + pytest.xfail("Multiprocessing can fail with quoted fields") + text = '1\t2\t3\n a\t b \t\n c\t" d\n e"\t ' + table = read_tab(text, parallel=parallel) + assert_equal(table['1'][0], ' a') # preserve line whitespace + assert_equal(table['2'][0], ' b ') # preserve field whitespace + assert table['3'][0] is ma.masked # empty value should be masked + assert_equal(table['2'][1], ' d e') # preserve whitespace in quoted fields + assert_equal(table['3'][1], ' ') # preserve end-of-line whitespace + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_default_data_start(parallel, read_basic): + """ + If data_start is not explicitly passed to read(), data processing should + beginning right after the header. + """ + text = 'ignore this line\na b c\n1 2 3\n4 5 6' + table = read_basic(text, header_start=1, parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_commented_header(parallel, read_commented_header): + """ + The FastCommentedHeader reader should mimic the behavior of the + CommentedHeader by overriding the default header behavior of FastBasic. + """ + text = """ + # A B C + 1 2 3 + 4 5 6 +""" + t1 = read_commented_header(text, parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) + assert_table_equal(t1, expected) + + text = '# first commented line\n # second commented line\n\n' + text + t2 = read_commented_header(text, header_start=2, data_start=0, parallel=parallel) + assert_table_equal(t2, expected) + t3 = read_commented_header(text, header_start=-1, data_start=0, parallel=parallel) # negative indexing allowed + assert_table_equal(t3, expected) + + text += '7 8 9' + t4 = read_commented_header(text, header_start=2, data_start=2, parallel=parallel) + expected = Table([[7], [8], [9]], names=('A', 'B', 'C')) + assert_table_equal(t4, expected) + + with pytest.raises(ParameterError): + read_commented_header(text, header_start=-1, data_start=-1, parallel=parallel) # data_start cannot be negative + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_rdb(parallel, read_rdb): + """ + Make sure the FastRdb reader works as expected. + """ + text = """ + +A\tB\tC +1n\tS\t4N +1\t 9\t4.3 +""" + table = read_rdb(text, parallel=parallel) + expected = Table([[1], [' 9'], [4.3]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + assert_equal(table['A'].dtype.kind, 'i') + assert table['B'].dtype.kind in ('S', 'U') + assert_equal(table['C'].dtype.kind, 'f') + + with pytest.raises(ValueError) as e: + text = 'A\tB\tC\nN\tS\tN\n4\tb\ta' # C column contains non-numeric data + read_rdb(text, parallel=parallel) + assert 'Column C failed to convert' in str(e) + + with pytest.raises(ValueError) as e: + text = 'A\tB\tC\nN\tN\n1\t2\t3' # not enough types specified + read_rdb(text, parallel=parallel) + assert 'mismatch between number of column names and column types' in str(e) + + with pytest.raises(ValueError) as e: + text = 'A\tB\tC\nN\tN\t5\n1\t2\t3' # invalid type for column C + read_rdb(text, parallel=parallel) + assert 'type definitions do not all match [num](N|S)' in str(e) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_data_start(parallel, read_basic): + """ + Make sure that data parsing begins at data_start (ignoring empty and + commented lines but not taking quoted values into account). + """ + if parallel: + pytest.xfail("Multiprocessing can fail with quoted fields") + text = """ +A B C +1 2 3 +4 5 6 + +7 8 "9 + \t1" +# comment +10 11 12 +""" + table = read_basic(text, data_start=2, parallel=parallel) + expected = Table([[4, 7, 10], [5, 8, 11], [6, 91, 12]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + table = read_basic(text, data_start=3, parallel=parallel) + # ignore empty line + expected = Table([[7, 10], [8, 11], [91, 12]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + with pytest.raises(CParserError) as e: + # tries to begin in the middle of quoted field + read_basic(text, data_start=4, parallel=parallel) + assert 'not enough columns found in line 1 of data' in str(e) + + table = read_basic(text, data_start=5, parallel=parallel) + # ignore commented line + expected = Table([[10], [11], [12]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + text = """ +A B C +1 2 3 +4 5 6 + +7 8 9 +# comment +10 11 12 +""" + # make sure reading works as expected in parallel + table = read_basic(text, data_start=2, parallel=parallel) + expected = Table([[4, 7, 10], [5, 8, 11], [6, 9, 12]], names=('A', 'B', 'C')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_quoted_empty_values(parallel, read_basic): + """ + Quoted empty values spanning multiple lines should be treated correctly. + """ + if parallel: + pytest.xfail("Multiprocessing can fail with quoted fields") + text = 'a b c\n1 2 " \n "' + table = read_basic(text, parallel=parallel) + assert table['c'][0] is ma.masked # empty value masked by default + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_csv_comment_default(parallel, read_csv): + """ + Unless the comment parameter is specified, the CSV reader should + not treat any lines as comments. + """ + text = 'a,b,c\n#1,2,3\n4,5,6' + table = read_csv(text, parallel=parallel) + expected = Table([['#1', '4'], [2, 5], [3, 6]], names=('a', 'b', 'c')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_whitespace_before_comment(parallel, read_tab): + """ + Readers that don't strip whitespace from data (Tab, RDB) + should still treat lines with leading whitespace and then + the comment char as comment lines. + """ + text = 'a\tb\tc\n # comment line\n1\t2\t3' + table = read_tab(text, parallel=parallel) + expected = Table([[1], [2], [3]], names=('a', 'b', 'c')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_strip_line_trailing_whitespace(parallel, read_basic): + """ + Readers that strip whitespace from lines should ignore + trailing whitespace after the last data value of each + row. + """ + text = 'a b c\n1 2 \n3 4 5' + with pytest.raises(CParserError) as e: + ascii.read(StringIO(text), format='fast_basic', guess=False) + assert 'not enough columns found in line 1' in str(e) + + text = 'a b c\n 1 2 3 \t \n 4 5 6 ' + table = read_basic(text, parallel=parallel) + expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_no_data(parallel, read_basic): + """ + As long as column names are supplied, the C reader + should return an empty table in the absence of data. + """ + table = read_basic('a b c', parallel=parallel) + expected = Table([[], [], []], names=('a', 'b', 'c')) + assert_table_equal(table, expected) + + table = read_basic('a b c\n1 2 3', data_start=2, parallel=parallel) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_line_endings(parallel, read_basic, read_commented_header, read_rdb): + """ + Make sure the fast reader accepts CR and CR+LF + as newlines. + """ + text = 'a b c\n1 2 3\n4 5 6\n7 8 9\n' + expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c')) + + for newline in ('\r\n', '\r'): + table = read_basic(text.replace('\n', newline), parallel=parallel) + assert_table_equal(table, expected) + + # Make sure the splitlines() method of FileString + # works with CR/CR+LF line endings + text = '#' + text + for newline in ('\r\n', '\r'): + table = read_commented_header(text.replace('\n', newline), parallel=parallel) + assert_table_equal(table, expected) + + expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c'), masked=True) + expected['a'][0] = np.ma.masked + expected['c'][0] = np.ma.masked + text = 'a\tb\tc\nN\tN\tN\n\t2\t\n4\t5\t6\n7\t8\t9\n' + for newline in ('\r\n', '\r'): + table = read_rdb(text.replace('\n', newline), parallel=parallel) + assert_table_equal(table, expected) + assert np.all(table == expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_store_comments(parallel, read_basic): + """ + Make sure that the output Table produced by the fast + reader stores any comment lines in its meta attribute. + """ + text = """ +# header comment +a b c +# comment 2 +# comment 3 +1 2 3 +4 5 6 +""" + table = read_basic(text, parallel=parallel, check_meta=True) + assert_equal(table.meta['comments'], + ['header comment', 'comment 2', 'comment 3']) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_empty_quotes(parallel, read_basic): + """ + Make sure the C reader doesn't segfault when the + input data contains empty quotes. [#3407] + """ + table = read_basic('a b\n1 ""\n2 ""', parallel=parallel) + expected = Table([[1, 2], [0, 0]], names=('a', 'b')) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_fast_tab_with_names(parallel, read_tab): + """ + Make sure the C reader doesn't segfault when the header for the + first column is missing [#3545] + """ + content = """# +\tdecDeg\tRate_pn_offAxis\tRate_mos2_offAxis\tObsID\tSourceID\tRADeg\tversion\tCounts_pn\tRate_pn\trun\tRate_mos1\tRate_mos2\tInserted_pn\tInserted_mos2\tbeta\tRate_mos1_offAxis\trcArcsec\tname\tInserted\tCounts_mos1\tInserted_mos1\tCounts_mos2\ty\tx\tCounts\toffAxis\tRot +-3.007559\t0.0000\t0.0010\t0013140201\t0\t213.462574\t0\t2\t0.0002\t0\t0.0001\t0.0001\t0\t1\t0.66\t0.0217\t3.0\tfakeXMMXCS J1413.8-0300\t3\t1\t2\t1\t398.000\t127.000\t5\t13.9\t72.3\t""" + head = ['A{0}'.format(i) for i in range(28)] + table = read_tab(content, data_start=1, + parallel=parallel, names=head) + + +@pytest.mark.skipif(not os.getenv('TEST_READ_HUGE_FILE'), + reason='Environment variable TEST_READ_HUGE_FILE must be ' + 'defined to run this test') +def test_read_big_table(tmpdir): + """Test reading of a huge file. + + This test generates a huge CSV file (~2.3Gb) before reading it (see + https://github.com/astropy/astropy/pull/5319). The test is run only if the + environment variable ``TEST_READ_HUGE_FILE`` is defined. Note that running + the test requires quite a lot of memory (~18Gb when reading the file) !! + + """ + NB_ROWS = 250000 + NB_COLS = 500 + filename = str(tmpdir.join("big_table.csv")) + + print("Creating a {} rows table ({} columns).".format(NB_ROWS, NB_COLS)) + data = np.random.random(NB_ROWS) + t = Table(data=[data]*NB_COLS, names=[str(i) for i in range(NB_COLS)]) + data = None + + print("Saving the table to {}".format(filename)) + t.write(filename, format='ascii.csv', overwrite=True) + t = None + + print("Counting the number of lines in the csv, it should be {}" + " + 1 (header).".format(NB_ROWS)) + assert sum(1 for line in open(filename)) == NB_ROWS + 1 + + print("Reading the file with astropy.") + t = Table.read(filename, format='ascii.csv', fast_reader=True) + assert len(t) == NB_ROWS + + +# fast_reader configurations: False| 'use_fast_converter'=False|True +@pytest.mark.parametrize('reader', [0, 1, 2]) +# catch Windows environment since we cannot use _read() with custom fast_reader +@pytest.mark.parametrize("parallel", [False, True]) +def test_data_out_of_range(parallel, reader): + """ + Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|) + shall be returned as 0 and +-inf respectively by the C parser, just like + the Python parser. + Test fast converter only to nominal accuracy. + """ + if os.name == 'nt': + pytest.xfail(reason="Multiprocessing is currently unsupported on Windows") + # Python reader and strtod() are expected to return precise results + rtol = 1.e-30 + if reader > 1: + rtol = 1.e-15 + # passing fast_reader dict with parametrize does not work! + if reader > 0: + fast_reader = {'parallel': parallel, 'use_fast_converter': reader > 1} + else: + fast_reader = False + if parallel: + if reader < 1: + pytest.skip("Multiprocessing only available in fast reader") + elif TRAVIS: + pytest.xfail("Multiprocessing can sometimes fail on Travis CI") + + fields = ['10.1E+199', '3.14e+313', '2048e+306', '0.6E-325', '-2.e345'] + values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf]) + t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False, + fast_reader=fast_reader) + read_values = np.array([col[0] for col in t.itercols()]) + assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324) + + # test some additional corner cases + fields = ['.0101E202', '0.000000314E+314', '1777E+305', '-1799E+305', '0.2e-323', + '2500e-327', ' 0.0000000000000000000001024E+330'] + values = np.array([1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308]) + t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False, + fast_reader=fast_reader) + read_values = np.array([col[0] for col in t.itercols()]) + assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324) + + # test corner cases again with non-standard exponent_style (auto-detection) + if reader < 2: + pytest.skip("Fortran exponent style only available in fast converter") + fast_reader.update({'exponent_style': 'A'}) + fields = ['.0101D202', '0.000000314d+314', '1777+305', '-1799E+305', '0.2e-323', + '2500-327', ' 0.0000000000000000000001024Q+330'] + t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False, + fast_reader=fast_reader) + read_values = np.array([col[0] for col in t.itercols()]) + assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324) + + +# catch Windows environment since we cannot use _read() with custom fast_reader +@pytest.mark.parametrize("parallel", [True, False]) +def test_int_out_of_range(parallel): + """ + Integer numbers outside int range shall be returned as string columns + consistent with the standard (Python) parser (no 'upcasting' to float). + """ + if os.name == 'nt': + pytest.xfail(reason="Multiprocessing is currently unsupported on Windows") + + imin = np.iinfo(np.int).min+1 + imax = np.iinfo(np.int).max-1 + huge = '{:d}'.format(imax+2) + + text = 'P M S\n {:d} {:d} {:s}'.format(imax, imin, huge) + expected = Table([[imax], [imin], [huge]], names=('P', 'M', 'S')) + table = ascii.read(text, format='basic', guess=False, + fast_reader={'parallel': parallel}) + assert_table_equal(table, expected) + + # check with leading zeroes to make sure strtol does not read them as octal + text = 'P M S\n000{:d} -0{:d} 00{:s}'.format(imax, -imin, huge) + expected = Table([[imax], [imin], ['00'+huge]], names=('P', 'M', 'S')) + table = ascii.read(text, format='basic', guess=False, + fast_reader={'parallel': parallel}) + assert_table_equal(table, expected) + + # mixed columns should be returned as float, but if the out-of-range integer + # shows up first, it will produce a string column - with both readers + pytest.xfail("Integer fallback depends on order of rows") + text = 'A B\n 12.3 {0:d}9\n {0:d}9 45.6e7'.format(imax) + expected = Table([[12.3, 10.*imax], [10.*imax, 4.56e8]], + names=('A', 'B')) + + table = ascii.read(text, format='basic', guess=False, + fast_reader={'parallel': parallel}) + assert_table_equal(table, expected) + table = ascii.read(text, format='basic', guess=False, fast_reader=False) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_fortran_reader(parallel): + """ + Make sure that ascii.read() can read Fortran-style exponential notation + using the fast_reader. + """ + if os.name == 'nt': + pytest.xfail(reason="Multiprocessing is currently unsupported on Windows") + + text = 'A B C\n100.01{:s}+99 2.0 3\n 4.2{:s}-1 5.0{:s}-1 0.6{:s}4' + expected = Table([[1.0001e101, 0.42], [2, 0.5], [3.0, 6000]], + names=('A', 'B', 'C')) + + expstyles = {'e': 4*('E'), 'D': ('D', 'd', 'd', 'D'), 'Q': 2*('q', 'Q'), + 'fortran': ('D', 'E', 'Q', 'd')} + + # C strtod (not-fast converter) can't handle Fortran exp + with pytest.raises(FastOptionsError) as e: + ascii.read(text.format(*(4*('D'))), format='basic', guess=False, + fast_reader={'use_fast_converter': False, + 'parallel': parallel, 'exponent_style': 'D'}) + assert 'fast_reader: exponent_style requires use_fast_converter' in str(e) + + # enable multiprocessing and the fast converter + # iterate over all style-exponent combinations + for s, c in expstyles.items(): + table = ascii.read(text.format(*c), format='basic', guess=False, + fast_reader={'parallel': parallel, + 'exponent_style': s}) + assert_table_equal(table, expected) + + # mixes and triple-exponents without any character using autodetect option + text = 'A B C\n1.0001+101 2.0E0 3\n.42d0 0.5 6.+003' + table = ascii.read(text, format='basic', guess=False, + fast_reader={'parallel': parallel, 'exponent_style': 'fortran'}) + assert_table_equal(table, expected) + + # additional corner-case checks + text = 'A B C\n1.0001+101 2.0+000 3\n0.42+000 0.5 6000.-000' + table = ascii.read(text, format='basic', guess=False, + fast_reader={'parallel': parallel, 'exponent_style': 'fortran'}) + assert_table_equal(table, expected) + + +@pytest.mark.parametrize("parallel", [True, False]) +def test_fortran_invalid_exp(parallel): + """ + Test Fortran-style exponential notation in the fast_reader with invalid + exponent-like patterns (no triple-digits) to make sure they are returned + as strings instead, as with the standard C parser. + """ + if os.name == 'nt': + pytest.xfail(reason="Multiprocessing is currently unsupported on Windows") + if parallel and TRAVIS: + pytest.xfail("Multiprocessing can sometimes fail on Travis CI") + + fields = ['1.0001+1', '.42d1', '2.3+10', '0.5', '3+1001', '3000.', + '2', '4.56e-2.3', '8000', '4.2-122'] + values = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3, + 2, '4.56e-2.3', 8000, 4.2e-122] + + t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False, + fast_reader={'parallel': parallel, 'exponent_style': 'A'}) + read_values = [col[0] for col in t.itercols()] + assert read_values == values diff --git a/astropy/io/ascii/tests/test_cds_header_from_readme.py b/astropy/io/ascii/tests/test_cds_header_from_readme.py new file mode 100644 index 0000000..43c58f6 --- /dev/null +++ b/astropy/io/ascii/tests/test_cds_header_from_readme.py @@ -0,0 +1,155 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from ... import ascii +from .common import (assert_equal, assert_almost_equal, has_isnan, + setup_function, teardown_function) + + +def read_table1(readme, data): + reader = ascii.Cds(readme) + return reader.read(data) + + +def read_table2(readme, data): + reader = ascii.get_reader(Reader=ascii.Cds, readme=readme) + reader.outputter = ascii.TableOutputter() + return reader.read(data) + + +def read_table3(readme, data): + return ascii.read(data, readme=readme) + + +def test_description(): + readme = 't/cds/description/ReadMe' + data = 't/cds/description/table.dat' + for read_table in (read_table1, read_table2, read_table3): + table = read_table(readme, data) + assert_equal(len(table), 2) + assert_equal(table['Cluster'].description, 'Cluster name') + assert_equal(table['Star'].description, '') + assert_equal(table['Wave'].description, 'wave? Wavelength in Angstroms') + assert_equal(table['El'].description, 'a') + assert_equal(table['ion'].description, '- Ionization stage (1 for neutral element)') + assert_equal(table['EW'].description, 'Equivalent width (in mA)') + assert_equal(table['Q'].description, 'DAOSPEC quality parameter Q(large values are bad)') + + +def test_multi_header(): + readme = 't/cds/multi/ReadMe' + data = 't/cds/multi/lhs2065.dat' + for read_table in (read_table1, read_table2, read_table3): + table = read_table(readme, data) + assert_equal(len(table), 18) + assert_almost_equal(table['Lambda'][-1], 6479.32) + assert_equal(table['Fnu'][-1], '0.285937') + data = 't/cds/multi/lp944-20.dat' + for read_table in (read_table1, read_table2, read_table3): + table = read_table(readme, data) + assert_equal(len(table), 18) + assert_almost_equal(table['Lambda'][0], 6476.09) + assert_equal(table['Fnu'][-1], '0.489005') + + +def test_glob_header(): + readme = 't/cds/glob/ReadMe' + data = 't/cds/glob/lmxbrefs.dat' + for read_table in (read_table1, read_table2, read_table3): + table = read_table(readme, data) + assert_equal(len(table), 291) + assert_equal(table['Name'][-1], 'J1914+0953') + assert_equal(table['BibCode'][-2], '2005A&A...432..235R') + + +def test_header_from_readme(): + r = ascii.Cds("t/vizier/ReadMe") + table = r.read("t/vizier/table1.dat") + assert len(r.data.data_lines) == 15 + assert len(table) == 15 + assert len(table.keys()) == 18 + Bmag = [14.79, + 15.00, + 14.80, + 12.38, + 12.36, + 12.24, + 13.75, + 13.65, + 13.41, + 11.59, + 11.68, + 11.53, + 13.92, + 14.03, + 14.18] + for i, val in enumerate(table.field('Bmag')): + assert val == Bmag[i] + + table = r.read("t/vizier/table5.dat") + assert len(r.data.data_lines) == 49 + assert len(table) == 49 + assert len(table.keys()) == 10 + Q = [0.289, + 0.325, + 0.510, + 0.577, + 0.539, + 0.390, + 0.957, + 0.736, + 1.435, + 1.117, + 1.473, + 0.808, + 1.416, + 2.209, + 0.617, + 1.046, + 1.604, + 1.419, + 1.431, + 1.183, + 1.210, + 1.005, + 0.706, + 0.665, + 0.340, + 0.323, + 0.391, + 0.280, + 0.343, + 0.369, + 0.495, + 0.828, + 1.113, + 0.499, + 1.038, + 0.260, + 0.863, + 1.638, + 0.479, + 0.232, + 0.627, + 0.671, + 0.371, + 0.851, + 0.607, + -9.999, + 1.958, + 1.416, + 0.949] + if has_isnan: + from .common import isnan + for i, val in enumerate(table.field('Q')): + if isnan(val): + # text value for a missing value in that table + assert Q[i] == -9.999 + else: + assert val == Q[i] + + +if __name__ == "__main__": # run from main directory; not from test/ + test_header_from_readme() + test_multi_header() + test_glob_header() + test_description() diff --git a/astropy/io/ascii/tests/test_compressed.py b/astropy/io/ascii/tests/test_compressed.py new file mode 100644 index 0000000..6d24213 --- /dev/null +++ b/astropy/io/ascii/tests/test_compressed.py @@ -0,0 +1,55 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +import os +import sys + +import pytest +import numpy as np + +from .. import read + +ROOT = os.path.abspath(os.path.dirname(__file__)) + + +try: + import bz2 # pylint: disable=W0611 +except ImportError: + HAS_BZ2 = False +else: + HAS_BZ2 = True + +try: + if sys.version_info >= (3, 3, 0): + import lzma + else: + from backports import lzma # pylint: disable=W0611 +except ImportError: + HAS_XZ = False +else: + HAS_XZ = True + + +@pytest.mark.parametrize('filename', ['t/daophot.dat.gz', 't/latex1.tex.gz', + 't/short.rdb.gz']) +def test_gzip(filename): + t_comp = read(os.path.join(ROOT, filename)) + t_uncomp = read(os.path.join(ROOT, filename.replace('.gz', ''))) + assert t_comp.dtype.names == t_uncomp.dtype.names + assert np.all(t_comp.as_array() == t_uncomp.as_array()) + + +@pytest.mark.xfail('not HAS_BZ2') +@pytest.mark.parametrize('filename', ['t/short.rdb.bz2', 't/ipac.dat.bz2']) +def test_bzip2(filename): + t_comp = read(os.path.join(ROOT, filename)) + t_uncomp = read(os.path.join(ROOT, filename.replace('.bz2', ''))) + assert t_comp.dtype.names == t_uncomp.dtype.names + assert np.all(t_comp.as_array() == t_uncomp.as_array()) + + +@pytest.mark.xfail('not HAS_XZ') +@pytest.mark.parametrize('filename', ['t/short.rdb.xz', 't/ipac.dat.xz']) +def test_xz(filename): + t_comp = read(os.path.join(ROOT, filename)) + t_uncomp = read(os.path.join(ROOT, filename.replace('.xz', ''))) + assert t_comp.dtype.names == t_uncomp.dtype.names + assert np.all(t_comp.as_array() == t_uncomp.as_array()) diff --git a/astropy/io/ascii/tests/test_connect.py b/astropy/io/ascii/tests/test_connect.py new file mode 100644 index 0000000..bbc56de --- /dev/null +++ b/astropy/io/ascii/tests/test_connect.py @@ -0,0 +1,140 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +import os + +import pytest + +from ....table import Table, Column + +ROOT = os.path.abspath(os.path.dirname(__file__)) + +files = ['t/cds.dat', 't/ipac.dat', 't/daophot.dat', 't/latex1.tex', + 't/simple_csv.csv'] + +# Check to see if the BeautifulSoup dependency is present. + +try: + from bs4 import BeautifulSoup # pylint: disable=W0611 + HAS_BEAUTIFUL_SOUP = True +except ImportError: + HAS_BEAUTIFUL_SOUP = False + +if HAS_BEAUTIFUL_SOUP: + files.append('t/html.html') + + +@pytest.mark.parametrize('filename', files) +def test_read_generic(filename): + Table.read(os.path.join(ROOT, filename), format='ascii') + + +def test_write_generic(tmpdir): + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + t.write(str(tmpdir.join("test")), format='ascii') + + +def test_read_ipac(): + Table.read(os.path.join(ROOT, 't/ipac.dat'), format='ipac') + + +def test_read_cds(): + Table.read(os.path.join(ROOT, 't/cds.dat'), format='cds') + + +def test_read_dapphot(): + Table.read(os.path.join(ROOT, 't/daophot.dat'), format='daophot') + + +def test_read_latex(): + Table.read(os.path.join(ROOT, 't/latex1.tex'), format='latex') + + +def test_read_latex_noformat(): + Table.read(os.path.join(ROOT, 't/latex1.tex')) + + +def test_write_latex(tmpdir): + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + path = str(tmpdir.join("data.tex")) + t.write(path, format='latex') + + +def test_write_latex_noformat(tmpdir): + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + path = str(tmpdir.join("data.tex")) + t.write(path) + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_read_html(): + Table.read(os.path.join(ROOT, 't/html.html'), format='html') + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_read_html_noformat(): + Table.read(os.path.join(ROOT, 't/html.html')) + + +def test_write_html(tmpdir): + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + path = str(tmpdir.join("data.html")) + t.write(path, format='html') + + +def test_write_html_noformat(tmpdir): + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + path = str(tmpdir.join("data.html")) + t.write(path) + + +def test_read_rdb(): + Table.read(os.path.join(ROOT, 't/short.rdb'), format='rdb') + + +def test_read_rdb_noformat(): + Table.read(os.path.join(ROOT, 't/short.rdb')) + + +def test_write_rdb(tmpdir): + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + path = str(tmpdir.join("data.rdb")) + t.write(path, format='rdb') + + +def test_write_rdb_noformat(tmpdir): + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + path = str(tmpdir.join("data.rdb")) + t.write(path) + + +def test_read_csv(): + '''If properly registered, filename should be sufficient to specify format + + #3189 + ''' + Table.read(os.path.join(ROOT, 't/simple_csv.csv')) + + +def test_write_csv(tmpdir): + '''If properly registered, filename should be sufficient to specify format + + #3189 + ''' + t = Table() + t.add_column(Column(name='a', data=[1, 2, 3])) + t.add_column(Column(name='b', data=['a', 'b', 'c'])) + path = str(tmpdir.join("data.csv")) + t.write(path) diff --git a/astropy/io/ascii/tests/test_ecsv.py b/astropy/io/ascii/tests/test_ecsv.py new file mode 100644 index 0000000..5869e8b --- /dev/null +++ b/astropy/io/ascii/tests/test_ecsv.py @@ -0,0 +1,418 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module tests some of the methods related to the ``ECSV`` +reader/writer. + +Requires `pyyaml `_ to be installed. +""" +import os +import copy +import sys + +import pytest +import numpy as np + +from ....table import Table, Column, QTable, NdarrayMixin +from ....table.table_helpers import simple_table +from ....coordinates import SkyCoord, Latitude, Longitude, Angle, EarthLocation +from ....time import Time, TimeDelta +from ....tests.helper import quantity_allclose +from ....units.quantity import QuantityInfo + +from ....extern.six.moves import StringIO +from ..ecsv import DELIMITERS +from ... import ascii +from .... import units as u + +try: + import yaml # pylint: disable=W0611 + HAS_YAML = True +except ImportError: + HAS_YAML = False + +DTYPES = ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', + 'uint64', 'float16', 'float32', 'float64', 'float128', + 'str'] +if os.name == 'nt' or sys.maxsize <= 2**32: + DTYPES.remove('float128') + +T_DTYPES = Table() + +for dtype in DTYPES: + if dtype == 'bool': + data = np.array([False, True, False]) + elif dtype == 'str': + data = np.array(['ab 0', 'ab, 1', 'ab2']) + else: + data = np.arange(3, dtype=dtype) + c = Column(data, unit='m / s', description='descr_' + dtype, + meta={'meta ' + dtype: 1}) + T_DTYPES[dtype] = c + +T_DTYPES.meta['comments'] = ['comment1', 'comment2'] + +# Corresponds to simple_table() +SIMPLE_LINES = ['# %ECSV 0.9', + '# ---', + '# datatype:', + '# - {name: a, datatype: int64}', + '# - {name: b, datatype: float64}', + '# - {name: c, datatype: string}', + '# schema: astropy-2.0', + 'a b c', + '1 1.0 c', + '2 2.0 d', + '3 3.0 e'] + + +@pytest.mark.skipif('not HAS_YAML') +def test_write_simple(): + """ + Write a simple table with common types. This shows the compact version + of serialization with one line per column. + """ + t = simple_table() + + out = StringIO() + t.write(out, format='ascii.ecsv') + assert out.getvalue().splitlines() == SIMPLE_LINES + + +@pytest.mark.skipif('not HAS_YAML') +def test_write_full(): + """ + Write a full-featured table with common types and explicitly checkout output + """ + t = T_DTYPES['bool', 'int64', 'float64', 'str'] + lines = ['# %ECSV 0.9', + '# ---', + '# datatype:', + '# - name: bool', + '# unit: m / s', + '# datatype: bool', + '# description: descr_bool', + '# meta: {meta bool: 1}', + '# - name: int64', + '# unit: m / s', + '# datatype: int64', + '# description: descr_int64', + '# meta: {meta int64: 1}', + '# - name: float64', + '# unit: m / s', + '# datatype: float64', + '# description: descr_float64', + '# meta: {meta float64: 1}', + '# - name: str', + '# unit: m / s', + '# datatype: string', + '# description: descr_str', + '# meta: {meta str: 1}', + '# meta: !!omap', + '# - comments: [comment1, comment2]', + '# schema: astropy-2.0', + 'bool int64 float64 str', + 'False 0 0.0 "ab 0"', + 'True 1 1.0 "ab, 1"', + 'False 2 2.0 ab2'] + + out = StringIO() + t.write(out, format='ascii.ecsv') + assert out.getvalue().splitlines() == lines + + +@pytest.mark.skipif('not HAS_YAML') +def test_write_read_roundtrip(): + """ + Write a full-featured table with all types and see that it round-trips on + readback. Use both space and comma delimiters. + """ + t = T_DTYPES + for delimiter in DELIMITERS: + out = StringIO() + t.write(out, format='ascii.ecsv', delimiter=delimiter) + + t2s = [Table.read(out.getvalue(), format='ascii.ecsv'), + Table.read(out.getvalue(), format='ascii'), + ascii.read(out.getvalue()), + ascii.read(out.getvalue(), format='ecsv', guess=False), + ascii.read(out.getvalue(), format='ecsv')] + for t2 in t2s: + assert t.meta == t2.meta + for name in t.colnames: + assert t[name].attrs_equal(t2[name]) + assert np.all(t[name] == t2[name]) + + +@pytest.mark.skipif('not HAS_YAML') +def test_bad_delimiter(): + """ + Passing a delimiter other than space or comma gives an exception + """ + out = StringIO() + with pytest.raises(ValueError) as err: + T_DTYPES.write(out, format='ascii.ecsv', delimiter='|') + assert 'only space and comma are allowed' in str(err.value) + + +@pytest.mark.skipif('not HAS_YAML') +def test_bad_header_start(): + """ + Bad header without initial # %ECSV x.x + """ + lines = copy.copy(SIMPLE_LINES) + lines[0] = '# %ECV 0.9' + with pytest.raises(ascii.InconsistentTableError): + Table.read('\n'.join(lines), format='ascii.ecsv', guess=False) + + +@pytest.mark.skipif('not HAS_YAML') +def test_bad_delimiter_input(): + """ + Illegal delimiter in input + """ + lines = copy.copy(SIMPLE_LINES) + lines.insert(2, '# delimiter: |') + with pytest.raises(ValueError) as err: + Table.read('\n'.join(lines), format='ascii.ecsv', guess=False) + assert 'only space and comma are allowed' in str(err.value) + + +@pytest.mark.skipif('not HAS_YAML') +def test_multidim_input(): + """ + Multi-dimensional column in input + """ + t = Table([np.arange(4).reshape(2, 2)], names=['a']) + out = StringIO() + with pytest.raises(ValueError) as err: + t.write(out, format='ascii.ecsv') + assert 'ECSV format does not support multidimensional column' in str(err.value) + + +@pytest.mark.skipif('not HAS_YAML') +def test_round_trip_empty_table(): + """Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)""" + t = Table(dtype=[bool, 'i', 'f'], names=['a', 'b', 'c']) + out = StringIO() + t.write(out, format='ascii.ecsv') + t2 = Table.read(out.getvalue(), format='ascii.ecsv') + assert t.dtype == t2.dtype + assert len(t2) == 0 + + +@pytest.mark.skipif('not HAS_YAML') +def test_csv_ecsv_colnames_mismatch(): + """ + Test that mismatch in column names from normal CSV header vs. + ECSV YAML header raises the expected exception. + """ + lines = copy.copy(SIMPLE_LINES) + header_index = lines.index('a b c') + lines[header_index] = 'a b d' + with pytest.raises(ValueError) as err: + ascii.read(lines, format='ecsv') + assert "column names from ECSV header ['a', 'b', 'c']" in str(err) + + +@pytest.mark.skipif('not HAS_YAML') +def test_regression_5604(): + """ + See https://github.com/astropy/astropy/issues/5604 for more. + """ + t = Table() + t.meta = {"foo": 5*u.km, "foo2": u.s} + t["bar"] = [7]*u.km + + out = StringIO() + t.write(out, format="ascii.ecsv") + + assert '!astropy.units.Unit' in out.getvalue() + assert '!astropy.units.Quantity' in out.getvalue() + + +def assert_objects_equal(obj1, obj2, attrs, compare_class=True): + if compare_class: + assert obj1.__class__ is obj2.__class__ + + info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description'] + for attr in attrs + info_attrs: + a1 = obj1 + a2 = obj2 + for subattr in attr.split('.'): + try: + a1 = getattr(a1, subattr) + a2 = getattr(a2, subattr) + except AttributeError: + a1 = a1[subattr] + a2 = a2[subattr] + + if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f': + assert quantity_allclose(a1, a2, rtol=1e-10) + else: + assert np.all(a1 == a2) + + +el = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km) +sc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4', + obstime='J1990.5') +scc = sc.copy() +scc.representation = 'cartesian' +tm = Time([51000.5, 51001.5], format='mjd', scale='tai', precision=5, location=el[0]) +tm2 = Time(tm, format='iso') +tm3 = Time(tm, location=el) +tm3.info.serialize_method['ecsv'] = 'jd1_jd2' + + +mixin_cols = { + 'tm': tm, + 'tm2': tm2, + 'tm3': tm3, + 'dt': TimeDelta([1, 2] * u.day), + 'sc': sc, + 'scc': scc, + 'scd': SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4', + obstime=['J1990.5'] * 2), + 'q': [1, 2] * u.m, + 'lat': Latitude([1, 2] * u.deg), + 'lon': Longitude([1, 2] * u.deg, wrap_angle=180.*u.deg), + 'ang': Angle([1, 2] * u.deg), + 'el': el, + # 'nd': NdarrayMixin(el) # not supported yet +} + +time_attrs = ['value', 'shape', 'format', 'scale', 'precision', + 'in_subfmt', 'out_subfmt', 'location'] +compare_attrs = { + 'c1': ['data'], + 'c2': ['data'], + 'tm': time_attrs, + 'tm2': time_attrs, + 'tm3': time_attrs, + 'dt': ['shape', 'value', 'format', 'scale'], + 'sc': ['ra', 'dec', 'representation', 'frame.name'], + 'scc': ['x', 'y', 'z', 'representation', 'frame.name'], + 'scd': ['ra', 'dec', 'distance', 'representation', 'frame.name'], + 'q': ['value', 'unit'], + 'lon': ['value', 'unit', 'wrap_angle'], + 'lat': ['value', 'unit'], + 'ang': ['value', 'unit'], + 'el': ['x', 'y', 'z', 'ellipsoid'], + 'nd': ['x', 'y', 'z'], +} + + +@pytest.mark.skipif('not HAS_YAML') +def test_ecsv_mixins_ascii_read_class(): + """Ensure that ascii.read(ecsv_file) returns the correct class + (QTable if any Quantity subclasses, Table otherwise). + """ + # Make a table with every mixin type except Quantities + t = QTable({name: col for name, col in mixin_cols.items() + if not isinstance(col.info, QuantityInfo)}) + out = StringIO() + t.write(out, format="ascii.ecsv") + t2 = ascii.read(out.getvalue(), format='ecsv') + assert type(t2) is Table + + # Add a single quantity column + t['lon'] = mixin_cols['lon'] + + out = StringIO() + t.write(out, format="ascii.ecsv") + t2 = ascii.read(out.getvalue(), format='ecsv') + assert type(t2) is QTable + + +@pytest.mark.skipif('not HAS_YAML') +def test_ecsv_mixins_qtable_to_table(): + """Test writing as QTable and reading as Table. Ensure correct classes + come out. + """ + names = sorted(mixin_cols) + + t = QTable([mixin_cols[name] for name in names], names=names) + out = StringIO() + t.write(out, format="ascii.ecsv") + t2 = Table.read(out.getvalue(), format='ascii.ecsv') + + assert t.colnames == t2.colnames + + for name, col in t.columns.items(): + col2 = t2[name] + attrs = compare_attrs[name] + compare_class = True + + if isinstance(col.info, QuantityInfo): + # Downgrade Quantity to Column + unit + assert type(col2) is Column + attrs = ['unit'] # Other attrs are lost + compare_class = False + + assert_objects_equal(col, col2, attrs, compare_class) + + +@pytest.mark.skipif('not HAS_YAML') +@pytest.mark.parametrize('table_cls', (Table, QTable)) +def test_ecsv_mixins_as_one(table_cls): + """Test write/read all cols at once and validate intermediate column names""" + names = sorted(mixin_cols) + + serialized_names = ['ang', + 'dt', + 'el.x', 'el.y', 'el.z', + 'lat', + 'lon', + 'q', + 'sc.ra', 'sc.dec', + 'scc.x', 'scc.y', 'scc.z', + 'scd.ra', 'scd.dec', 'scd.distance', + 'scd.obstime', + 'tm', # serialize_method is formatted_value + 'tm2', # serialize_method is formatted_value + 'tm3.jd1', 'tm3.jd2', # serialize is jd1_jd2 + 'tm3.location.x', 'tm3.location.y', 'tm3.location.z'] + + t = table_cls([mixin_cols[name] for name in names], names=names) + + out = StringIO() + t.write(out, format="ascii.ecsv") + t2 = table_cls.read(out.getvalue(), format='ascii.ecsv') + + assert t.colnames == t2.colnames + + # Read as a ascii.basic table (skip all the ECSV junk) + t3 = table_cls.read(out.getvalue(), format='ascii.basic') + assert t3.colnames == serialized_names + + +@pytest.mark.skipif('not HAS_YAML') +@pytest.mark.parametrize('name_col', list(mixin_cols.items())) +@pytest.mark.parametrize('table_cls', (Table, QTable)) +def test_ecsv_mixins_per_column(table_cls, name_col): + """Test write/read one col at a time and do detailed validation""" + name, col = name_col + + c = [1.0, 2.0] + t = table_cls([c, col, c], names=['c1', name, 'c2']) + t[name].info.description = 'description' + + if not t.has_mixin_columns: + pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)') + + if isinstance(t[name], NdarrayMixin): + pytest.xfail('NdarrayMixin not supported') + + out = StringIO() + t.write(out, format="ascii.ecsv") + t2 = table_cls.read(out.getvalue(), format='ascii.ecsv') + + assert t.colnames == t2.colnames + + for colname in t.colnames: + assert_objects_equal(t[colname], t2[colname], compare_attrs[colname]) + + # Special case to make sure Column type doesn't leak into Time class data + if name.startswith('tm'): + assert t2[name]._time.jd1.__class__ is np.ndarray + assert t2[name]._time.jd2.__class__ is np.ndarray diff --git a/astropy/io/ascii/tests/test_fixedwidth.py b/astropy/io/ascii/tests/test_fixedwidth.py new file mode 100644 index 0000000..c51324d --- /dev/null +++ b/astropy/io/ascii/tests/test_fixedwidth.py @@ -0,0 +1,481 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +import pytest + +from ....extern.six.moves import cStringIO as StringIO +from ... import ascii +from ..core import InconsistentTableError +from .common import (assert_equal, assert_almost_equal, + setup_function, teardown_function) + + +def assert_equal_splitlines(arg1, arg2): + assert_equal(arg1.splitlines(), arg2.splitlines()) + + +def test_read_normal(): + """Nice, typical fixed format table""" + table = """ +# comment (with blank line above) +| Col1 | Col2 | +| 1.2 | "hello" | +| 2.4 |'s worlds| +""" + reader = ascii.get_reader(Reader=ascii.FixedWidth) + dat = reader.read(table) + assert_equal(dat.colnames, ['Col1', 'Col2']) + assert_almost_equal(dat[1][0], 2.4) + assert_equal(dat[0][1], '"hello"') + assert_equal(dat[1][1], "'s worlds") + + +def test_read_normal_names(): + """Nice, typical fixed format table with col names provided""" + table = """ +# comment (with blank line above) +| Col1 | Col2 | +| 1.2 | "hello" | +| 2.4 |'s worlds| +""" + reader = ascii.get_reader(Reader=ascii.FixedWidth, + names=('name1', 'name2')) + dat = reader.read(table) + assert_equal(dat.colnames, ['name1', 'name2']) + assert_almost_equal(dat[1][0], 2.4) + + +def test_read_normal_names_include(): + """Nice, typical fixed format table with col names provided""" + table = """ +# comment (with blank line above) +| Col1 | Col2 | Col3 | +| 1.2 | "hello" | 3 | +| 2.4 |'s worlds| 7 | +""" + reader = ascii.get_reader(Reader=ascii.FixedWidth, + names=('name1', 'name2', 'name3'), + include_names=('name1', 'name3')) + dat = reader.read(table) + assert_equal(dat.colnames, ['name1', 'name3']) + assert_almost_equal(dat[1][0], 2.4) + assert_equal(dat[0][1], 3) + + +def test_read_normal_exclude(): + """Nice, typical fixed format table with col name excluded""" + table = """ +# comment (with blank line above) +| Col1 | Col2 | +| 1.2 | "hello" | +| 2.4 |'s worlds| +""" + reader = ascii.get_reader(Reader=ascii.FixedWidth, + exclude_names=('Col1',)) + dat = reader.read(table) + assert_equal(dat.colnames, ['Col2']) + assert_equal(dat[1][0], "'s worlds") + + +def test_read_weird(): + """Weird input table with data values chopped by col extent """ + table = """ + Col1 | Col2 | + 1.2 "hello" + 2.4 sdf's worlds +""" + reader = ascii.get_reader(Reader=ascii.FixedWidth) + dat = reader.read(table) + assert_equal(dat.colnames, ['Col1', 'Col2']) + assert_almost_equal(dat[1][0], 2.4) + assert_equal(dat[0][1], '"hel') + assert_equal(dat[1][1], "df's wo") + + +def test_read_double(): + """Table with double delimiters""" + table = """ +|| Name || Phone || TCP|| +| John | 555-1234 |192.168.1.10X| +| Mary | 555-2134 |192.168.1.12X| +| Bob | 555-4527 | 192.168.1.9X| +""" + dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False) + assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) + assert_equal(dat[1][0], "Mary") + assert_equal(dat[0][1], "555-1234") + assert_equal(dat[2][2], "192.168.1.9") + + +def test_read_space_delimiter(): + """Table with space delimiter""" + table = """ + Name --Phone- ----TCP----- + John 555-1234 192.168.1.10 + Mary 555-2134 192.168.1.12 + Bob 555-4527 192.168.1.9 +""" + dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False, + delimiter=' ') + assert_equal(tuple(dat.dtype.names), ('Name', '--Phone-', '----TCP-----')) + assert_equal(dat[1][0], "Mary") + assert_equal(dat[0][1], "555-1234") + assert_equal(dat[2][2], "192.168.1.9") + + +def test_read_no_header_autocolumn(): + """Table with no header row and auto-column naming""" + table = """ +| John | 555-1234 |192.168.1.10| +| Mary | 555-2134 |192.168.1.12| +| Bob | 555-4527 | 192.168.1.9| +""" + dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False, + header_start=None, data_start=0) + assert_equal(tuple(dat.dtype.names), ('col1', 'col2', 'col3')) + assert_equal(dat[1][0], "Mary") + assert_equal(dat[0][1], "555-1234") + assert_equal(dat[2][2], "192.168.1.9") + + +def test_read_no_header_names(): + """Table with no header row and with col names provided. Second + and third rows also have hanging spaces after final |.""" + table = """ +| John | 555-1234 |192.168.1.10| +| Mary | 555-2134 |192.168.1.12| +| Bob | 555-4527 | 192.168.1.9| +""" + dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False, + header_start=None, data_start=0, + names=('Name', 'Phone', 'TCP')) + assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) + assert_equal(dat[1][0], "Mary") + assert_equal(dat[0][1], "555-1234") + assert_equal(dat[2][2], "192.168.1.9") + + +def test_read_no_header_autocolumn_NoHeader(): + """Table with no header row and auto-column naming""" + table = """ +| John | 555-1234 |192.168.1.10| +| Mary | 555-2134 |192.168.1.12| +| Bob | 555-4527 | 192.168.1.9| +""" + dat = ascii.read(table, Reader=ascii.FixedWidthNoHeader) + assert_equal(tuple(dat.dtype.names), ('col1', 'col2', 'col3')) + assert_equal(dat[1][0], "Mary") + assert_equal(dat[0][1], "555-1234") + assert_equal(dat[2][2], "192.168.1.9") + + +def test_read_no_header_names_NoHeader(): + """Table with no header row and with col names provided. Second + and third rows also have hanging spaces after final |.""" + table = """ +| John | 555-1234 |192.168.1.10| +| Mary | 555-2134 |192.168.1.12| +| Bob | 555-4527 | 192.168.1.9| +""" + dat = ascii.read(table, Reader=ascii.FixedWidthNoHeader, + names=('Name', 'Phone', 'TCP')) + assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) + assert_equal(dat[1][0], "Mary") + assert_equal(dat[0][1], "555-1234") + assert_equal(dat[2][2], "192.168.1.9") + + +def test_read_col_starts(): + """Table with no delimiter with column start and end values specified.""" + table = """ +# 5 9 17 18 28 +# | | || | + John 555- 1234 192.168.1.10 + Mary 555- 2134 192.168.1.12 + Bob 555- 4527 192.168.1.9 +""" + dat = ascii.read(table, Reader=ascii.FixedWidthNoHeader, + names=('Name', 'Phone', 'TCP'), + col_starts=(0, 9, 18), + col_ends=(5, 17, 28), + ) + assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) + assert_equal(dat[0][1], "555- 1234") + assert_equal(dat[1][0], "Mary") + assert_equal(dat[1][2], "192.168.1.") + assert_equal(dat[2][2], "192.168.1") # col_end=28 cuts this column off + + +def test_read_detect_col_starts_or_ends(): + """Table with no delimiter with only column start or end values specified""" + table = """ +#1 9 19 <== Column start indexes +#| | | <== Column start positions +#<------><--------><-------------> <== Inferred column positions + John 555- 1234 192.168.1.10 + Mary 555- 2134 192.168.1.123 + Bob 555- 4527 192.168.1.9 + Bill 555-9875 192.255.255.255 +""" + for kwargs in ({'col_starts': (1, 9, 19)}, + {'col_ends': (8, 18, 33)}): + dat = ascii.read(table, + Reader=ascii.FixedWidthNoHeader, + names=('Name', 'Phone', 'TCP'), + **kwargs) + assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) + assert_equal(dat[0][1], "555- 1234") + assert_equal(dat[1][0], "Mary") + assert_equal(dat[1][2], "192.168.1.123") + assert_equal(dat[3][2], "192.255.255.255") + + +table = """\ +| Col1 | Col2 | Col3 | Col4 | +| 1.2 | "hello" | 1 | a | +| 2.4 | 's worlds | 2 | 2 | +""" +dat = ascii.read(table, Reader=ascii.FixedWidth) + + +def test_write_normal(): + """Write a table as a normal fixed width table.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidth) + assert_equal_splitlines(out.getvalue(), """\ +| Col1 | Col2 | Col3 | Col4 | +| 1.2 | "hello" | 1 | a | +| 2.4 | 's worlds | 2 | 2 | +""") + + +def test_write_fill_values(): + """Write a table as a normal fixed width table.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidth, + fill_values=('a', 'N/A')) + assert_equal_splitlines(out.getvalue(), """\ +| Col1 | Col2 | Col3 | Col4 | +| 1.2 | "hello" | 1 | N/A | +| 2.4 | 's worlds | 2 | 2 | +""") + + +def test_write_no_pad(): + """Write a table as a fixed width table with no padding.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidth, + delimiter_pad=None) + assert_equal_splitlines(out.getvalue(), """\ +|Col1| Col2|Col3|Col4| +| 1.2| "hello"| 1| a| +| 2.4|'s worlds| 2| 2| +""") + + +def test_write_no_bookend(): + """Write a table as a fixed width table with no bookend.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidth, bookend=False) + assert_equal_splitlines(out.getvalue(), """\ +Col1 | Col2 | Col3 | Col4 + 1.2 | "hello" | 1 | a + 2.4 | 's worlds | 2 | 2 +""") + + +def test_write_no_delimiter(): + """Write a table as a fixed width table with no delimiter.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidth, bookend=False, + delimiter=None) + assert_equal_splitlines(out.getvalue(), """\ +Col1 Col2 Col3 Col4 + 1.2 "hello" 1 a + 2.4 's worlds 2 2 +""") + + +def test_write_noheader_normal(): + """Write a table as a normal fixed width table.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader) + assert_equal_splitlines(out.getvalue(), """\ +| 1.2 | "hello" | 1 | a | +| 2.4 | 's worlds | 2 | 2 | +""") + + +def test_write_noheader_no_pad(): + """Write a table as a fixed width table with no padding.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader, + delimiter_pad=None) + assert_equal_splitlines(out.getvalue(), """\ +|1.2| "hello"|1|a| +|2.4|'s worlds|2|2| +""") + + +def test_write_noheader_no_bookend(): + """Write a table as a fixed width table with no bookend.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader, + bookend=False) + assert_equal_splitlines(out.getvalue(), """\ +1.2 | "hello" | 1 | a +2.4 | 's worlds | 2 | 2 +""") + + +def test_write_noheader_no_delimiter(): + """Write a table as a fixed width table with no delimiter.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader, bookend=False, + delimiter=None) + assert_equal_splitlines(out.getvalue(), """\ +1.2 "hello" 1 a +2.4 's worlds 2 2 +""") + + +def test_write_formats(): + """Write a table as a fixed width table with no delimiter.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidth, + formats={'Col1': '%-8.3f', 'Col2': '%-15s'}) + assert_equal_splitlines(out.getvalue(), """\ +| Col1 | Col2 | Col3 | Col4 | +| 1.200 | "hello" | 1 | a | +| 2.400 | 's worlds | 2 | 2 | +""") + + +def test_read_twoline_normal(): + """Typical fixed format table with two header lines (with some cruft + thrown in to test column positioning""" + table = """ + Col1 Col2 + ---- --------- + 1.2xx"hello" + 2.4 's worlds +""" + dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine) + assert_equal(dat.dtype.names, ('Col1', 'Col2')) + assert_almost_equal(dat[1][0], 2.4) + assert_equal(dat[0][1], '"hello"') + assert_equal(dat[1][1], "'s worlds") + + +def test_read_twoline_ReST(): + """Read restructured text table""" + table = """ +======= =========== + Col1 Col2 +======= =========== + 1.2 "hello" + 2.4 's worlds +======= =========== +""" + dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, + header_start=1, position_line=2, data_end=-1) + assert_equal(dat.dtype.names, ('Col1', 'Col2')) + assert_almost_equal(dat[1][0], 2.4) + assert_equal(dat[0][1], '"hello"') + assert_equal(dat[1][1], "'s worlds") + + +def test_read_twoline_human(): + """Read text table designed for humans and test having position line + before the header line""" + table = """ ++------+----------+ +| Col1 | Col2 | ++------|----------+ +| 1.2 | "hello" | +| 2.4 | 's worlds| ++------+----------+ +""" + dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, + delimiter='+', + header_start=1, position_line=0, + data_start=3, data_end=-1) + assert_equal(dat.dtype.names, ('Col1', 'Col2')) + assert_almost_equal(dat[1][0], 2.4) + assert_equal(dat[0][1], '"hello"') + assert_equal(dat[1][1], "'s worlds") + + +def test_read_twoline_fail(): + """Test failure if too many different character are on position line. + + The position line shall consist of only one character in addition to + the delimiter. + """ + table = """ +| Col1 | Col2 | +|------|==========| +| 1.2 | "hello" | +| 2.4 | 's worlds| +""" + with pytest.raises(InconsistentTableError) as excinfo: + dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, + delimiter='|', guess=False) + assert 'Position line should only contain delimiters and one other character' in str(excinfo.value) + + +def test_read_twoline_wrong_marker(): + '''Test failure when position line uses characters prone to ambiguity + + Characters in position line must be part an allowed set because + normal letters or numbers will lead to ambiguous tables. + ''' + table = """ +| Col1 | Col2 | +|aaaaaa|aaaaaaaaaa| +| 1.2 | "hello" | +| 2.4 | 's worlds| +""" + with pytest.raises(InconsistentTableError) as excinfo: + dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, + delimiter='|', guess=False) + assert 'Characters in position line must be part' in str(excinfo.value) + + +def test_write_twoline_normal(): + """Write a table as a normal fixed width table.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidthTwoLine) + assert_equal_splitlines(out.getvalue(), """\ +Col1 Col2 Col3 Col4 +---- --------- ---- ---- + 1.2 "hello" 1 a + 2.4 's worlds 2 2 +""") + + +def test_write_twoline_no_pad(): + """Write a table as a fixed width table with no padding.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidthTwoLine, + delimiter_pad=' ', position_char='=') + assert_equal_splitlines(out.getvalue(), """\ +Col1 Col2 Col3 Col4 +==== ========= ==== ==== + 1.2 "hello" 1 a + 2.4 's worlds 2 2 +""") + + +def test_write_twoline_no_bookend(): + """Write a table as a fixed width table with no bookend.""" + out = StringIO() + ascii.write(dat, out, Writer=ascii.FixedWidthTwoLine, + bookend=True, delimiter='|') + assert_equal_splitlines(out.getvalue(), """\ +|Col1| Col2|Col3|Col4| +|----|---------|----|----| +| 1.2| "hello"| 1| a| +| 2.4|'s worlds| 2| 2| +""") diff --git a/astropy/io/ascii/tests/test_html.py b/astropy/io/ascii/tests/test_html.py new file mode 100644 index 0000000..9b42946 --- /dev/null +++ b/astropy/io/ascii/tests/test_html.py @@ -0,0 +1,730 @@ +# -*- coding: utf-8 -*- + +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +This module tests some of the methods related to the ``HTML`` +reader/writer and aims to document its functionality. + +Requires `BeautifulSoup `_ +to be installed. +""" + +from .. import html +from .. import core +from ....table import Table + +import pytest +import numpy as np + +from .common import setup_function, teardown_function +from ... import ascii +from ....extern.six.moves import range, cStringIO as StringIO +from ....utils.xml.writer import HAS_BLEACH + +# Check to see if the BeautifulSoup dependency is present. +try: + + from bs4 import BeautifulSoup, FeatureNotFound + HAS_BEAUTIFUL_SOUP = True +except ImportError: + HAS_BEAUTIFUL_SOUP = False + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_soupstring(): + """ + Test to make sure the class SoupString behaves properly. + """ + + soup = BeautifulSoup('

    foo

    ') + soup_str = html.SoupString(soup) + assert isinstance(soup_str, str) + assert isinstance(soup_str, html.SoupString) + assert soup_str == '

    foo

    ' + assert soup_str.soup is soup + + +def test_listwriter(): + """ + Test to make sure the class ListWriter behaves properly. + """ + + lst = [] + writer = html.ListWriter(lst) + + for i in range(5): + writer.write(i) + for ch in 'abcde': + writer.write(ch) + + assert lst == [0, 1, 2, 3, 4, 'a', 'b', 'c', 'd', 'e'] + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_identify_table(): + """ + Test to make sure that identify_table() returns whether the + given BeautifulSoup tag is the correct table to process. + """ + + # Should return False on non- tags and None + soup = BeautifulSoup('') + assert html.identify_table(soup, {}, 0) is False + assert html.identify_table(None, {}, 0) is False + + soup = BeautifulSoup('
    ' + '
    A
    B
    ').table + assert html.identify_table(soup, {}, 2) is False + assert html.identify_table(soup, {}, 1) is True # Default index of 1 + + # Same tests, but with explicit parameter + assert html.identify_table(soup, {'table_id': 2}, 1) is False + assert html.identify_table(soup, {'table_id': 1}, 1) is True + + # Test identification by string ID + assert html.identify_table(soup, {'table_id': 'bar'}, 1) is False + assert html.identify_table(soup, {'table_id': 'foo'}, 1) is True + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_missing_data(): + """ + Test reading a table with missing data + """ + # First with default where blank => '0' + table_in = ['', + '', + '', + '', + '
    A
    1
    '] + dat = Table.read(table_in, format='ascii.html') + assert dat.masked is True + assert np.all(dat['A'].mask == [True, False]) + assert dat['A'].dtype.kind == 'i' + + # Now with a specific value '...' => missing + table_in = ['', + '', + '', + '', + '
    A
    ...
    1
    '] + dat = Table.read(table_in, format='ascii.html', fill_values=[('...', '0')]) + assert dat.masked is True + assert np.all(dat['A'].mask == [True, False]) + assert dat['A'].dtype.kind == 'i' + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_rename_cols(): + """ + Test reading a table and renaming cols + """ + table_in = ['', + '', + '', + '
    A B
    12
    '] + + # Swap column names + dat = Table.read(table_in, format='ascii.html', names=['B', 'A']) + assert dat.colnames == ['B', 'A'] + assert len(dat) == 1 + + # Swap column names and only include A (the renamed version) + dat = Table.read(table_in, format='ascii.html', names=['B', 'A'], include_names=['A']) + assert dat.colnames == ['A'] + assert len(dat) == 1 + assert np.all(dat['A'] == 2) + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_no_names(): + """ + Test reading a table witn no column header + """ + table_in = ['', + '', + '', + '
    1
    2
    '] + dat = Table.read(table_in, format='ascii.html') + assert dat.colnames == ['col1'] + assert len(dat) == 2 + + dat = Table.read(table_in, format='ascii.html', names=['a']) + assert dat.colnames == ['a'] + assert len(dat) == 2 + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_identify_table_fail(): + """ + Raise an exception with an informative error message if table_id + is not found. + """ + table_in = ['', + '
    A
    B
    '] + + with pytest.raises(core.InconsistentTableError) as err: + Table.read(table_in, format='ascii.html', htmldict={'table_id': 'bad_id'}, + guess=False) + assert str(err).endswith("ERROR: HTML table id 'bad_id' not found") + + with pytest.raises(core.InconsistentTableError) as err: + Table.read(table_in, format='ascii.html', htmldict={'table_id': 3}, + guess=False) + assert str(err).endswith("ERROR: HTML table number 3 not found") + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_backend_parsers(): + """ + Make sure the user can specify which back-end parser to use + and that an error is raised if the parser is invalid. + """ + for parser in ('lxml', 'xml', 'html.parser', 'html5lib'): + try: + table = Table.read('t/html2.html', format='ascii.html', + htmldict={'parser': parser}, guess=False) + except FeatureNotFound: + if parser == 'html.parser': + raise + # otherwise ignore if the dependency isn't present + + # reading should fail if the parser is invalid + with pytest.raises(FeatureNotFound): + Table.read('t/html2.html', format='ascii.html', + htmldict={'parser': 'foo'}, guess=False) + + +@pytest.mark.skipif('HAS_BEAUTIFUL_SOUP') +def test_htmlinputter_no_bs4(): + """ + This should return an OptionalTableImportError if BeautifulSoup + is not installed. + """ + + inputter = html.HTMLInputter() + with pytest.raises(core.OptionalTableImportError): + inputter.process_lines([]) + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_htmlinputter(): + """ + Test to ensure that HTMLInputter correctly converts input + into a list of SoupStrings representing table elements. + """ + + f = 't/html.html' + with open(f) as fd: + table = fd.read() + + inputter = html.HTMLInputter() + inputter.html = {} + + # In absence of table_id, defaults to the first table + expected = ['Column 1Column 2Column 3', + '1a1.05', + '2b2.75', + '3c-1.25'] + assert [str(x) for x in inputter.get_lines(table)] == expected + + # Should raise an InconsistentTableError if the table is not found + inputter.html = {'table_id': 4} + with pytest.raises(core.InconsistentTableError): + inputter.get_lines(table) + + # Identification by string ID + inputter.html['table_id'] = 'second' + expected = ['Column AColumn BColumn C', + '4d10.5', + '5e27.5', + '6f-12.5'] + assert [str(x) for x in inputter.get_lines(table)] == expected + + # Identification by integer index + inputter.html['table_id'] = 3 + expected = ['C1C2C3', + '7g105.0', + '8h275.0', + '9i-125.0'] + assert [str(x) for x in inputter.get_lines(table)] == expected + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_htmlsplitter(): + """ + Test to make sure that HTMLSplitter correctly inputs lines + of type SoupString to return a generator that gives all + header and data elements. + """ + + splitter = html.HTMLSplitter() + + lines = [html.SoupString(BeautifulSoup('
    Col 1Col 2
    ').tr), + html.SoupString(BeautifulSoup('
    Data 1Data 2
    ').tr)] + expected_data = [['Col 1', 'Col 2'], ['Data 1', 'Data 2']] + assert list(splitter(lines)) == expected_data + + # Make sure the presence of a non-SoupString triggers a TypeError + lines.append('Data 3Data 4') + with pytest.raises(TypeError): + list(splitter(lines)) + + # Make sure that passing an empty list triggers an error + with pytest.raises(core.InconsistentTableError): + list(splitter([])) + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_htmlheader_start(): + """ + Test to ensure that the start_line method of HTMLHeader + returns the first line of header data. Uses t/html.html + for sample input. + """ + + f = 't/html.html' + with open(f) as fd: + table = fd.read() + + inputter = html.HTMLInputter() + inputter.html = {} + header = html.HTMLHeader() + + lines = inputter.get_lines(table) + assert str(lines[header.start_line(lines)]) == \ + 'Column 1Column 2Column 3' + inputter.html['table_id'] = 'second' + lines = inputter.get_lines(table) + assert str(lines[header.start_line(lines)]) == \ + 'Column AColumn BColumn C' + inputter.html['table_id'] = 3 + lines = inputter.get_lines(table) + assert str(lines[header.start_line(lines)]) == \ + 'C1C2C3' + + # start_line should return None if no valid header is found + lines = [html.SoupString(BeautifulSoup('
    Data
    ').tr), + html.SoupString(BeautifulSoup('

    Text

    ').p)] + assert header.start_line(lines) is None + + # Should raise an error if a non-SoupString is present + lines.append('Header') + with pytest.raises(TypeError): + header.start_line(lines) + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_htmldata(): + """ + Test to ensure that the start_line and end_lines methods + of HTMLData returns the first line of table data. Uses + t/html.html for sample input. + """ + + f = 't/html.html' + with open(f) as fd: + table = fd.read() + + inputter = html.HTMLInputter() + inputter.html = {} + data = html.HTMLData() + + lines = inputter.get_lines(table) + assert str(lines[data.start_line(lines)]) == \ + '1a1.05' + # end_line returns the index of the last data element + 1 + assert str(lines[data.end_line(lines) - 1]) == \ + '3c-1.25' + + inputter.html['table_id'] = 'second' + lines = inputter.get_lines(table) + assert str(lines[data.start_line(lines)]) == \ + '4d10.5' + assert str(lines[data.end_line(lines) - 1]) == \ + '6f-12.5' + + inputter.html['table_id'] = 3 + lines = inputter.get_lines(table) + assert str(lines[data.start_line(lines)]) == \ + '7g105.0' + assert str(lines[data.end_line(lines) - 1]) == \ + '9i-125.0' + + # start_line should raise an error if no table data exists + lines = [html.SoupString(BeautifulSoup('
    ').div), + html.SoupString(BeautifulSoup('

    Text

    ').p)] + with pytest.raises(core.InconsistentTableError): + data.start_line(lines) + + # end_line should return None if no table data exists + assert data.end_line(lines) is None + + # Should raise an error if a non-SoupString is present + lines.append('Data') + with pytest.raises(TypeError): + data.start_line(lines) + with pytest.raises(TypeError): + data.end_line(lines) + + +def test_multicolumn_write(): + """ + Test to make sure that the HTML writer writes multidimensional + columns (those with iterable elements) using the colspan + attribute of . + """ + + col1 = [1, 2, 3] + col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] + col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')] + table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) + expected = """\ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    C1C2C3
    11.01.0aaa
    22.02.0bbb
    33.03.0ccc
    + + + """ + out = html.HTML().write(table)[0].strip() + assert out == expected.strip() + + +@pytest.mark.skipif('not HAS_BLEACH') +def test_multicolumn_write_escape(): + """ + Test to make sure that the HTML writer writes multidimensional + columns (those with iterable elements) using the colspan + attribute of . + """ + + col1 = [1, 2, 3] + col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] + col3 = [('', '', 'a'), ('', 'b', 'b'), ('c', 'c', 'c')] + table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) + expected = """\ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    C1C2C3
    11.01.0a
    22.02.0bb
    33.03.0ccc
    + + + """ + out = html.HTML(htmldict={'raw_html_cols': 'C3'}).write(table)[0].strip() + assert out == expected.strip() + + +def test_write_no_multicols(): + """ + Test to make sure that the HTML writer will not use + multi-dimensional columns if the multicol parameter + is False. + """ + + col1 = [1, 2, 3] + col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] + col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')] + table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) + expected = """\ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    C1C2C3
    11.0 .. 1.0a .. a
    22.0 .. 2.0b .. b
    33.0 .. 3.0c .. c
    + + + """ + assert html.HTML({'multicol': False}).write(table)[0].strip() == \ + expected.strip() + + +@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') +def test_multicolumn_read(): + """ + Test to make sure that the HTML reader inputs multidimensional + columns (those with iterable elements) using the colspan + attribute of . + + Ensure that any string element within a multidimensional column + casts all elements to string prior to type conversion operations. + """ + + table = Table.read('t/html2.html', format='ascii.html') + str_type = np.dtype((np.str, 21)) + expected = Table(np.array([(['1', '2.5000000000000000001'], 3), + (['1a', '1'], 3.5)], + dtype=[('A', str_type, (2,)), ('B', 'x'], ['y']], names=['a', 'b']) + + # One column contains raw HTML (string input) + out = StringIO() + t.write(out, format='ascii.html', htmldict={'raw_html_cols': 'a'}) + expected = """\ + + x + <em>y</em> + """ + assert expected in out.getvalue() + + # One column contains raw HTML (list input) + out = StringIO() + t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a']}) + assert expected in out.getvalue() + + # Two columns contains raw HTML (list input) + out = StringIO() + t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a', 'b']}) + expected = """\ + + x + y + """ + assert expected in out.getvalue() + + +@pytest.mark.skipif('not HAS_BLEACH') +def test_raw_html_write_clean(): + """ + Test that columns can contain raw HTML which is not escaped. + """ + import bleach + + t = Table([[''], ['

    y

    '], ['y']], names=['a', 'b', 'c']) + + # Confirm that +""" % dict(sorting_script1=_SORTING_SCRIPT_PART_1, + sorting_script2=_SORTING_SCRIPT_PART_2) + +HTML_JS_SCRIPT = _SORTING_SCRIPT_PART_1 + _SORTING_SCRIPT_PART_2 + """ +$(document).ready(function() {{ + $('#{tid}').dataTable({{ + order: [], + pageLength: {display_length}, + lengthMenu: {display_length_menu}, + pagingType: "full_numbers", + columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}] + }}); +}} ); +""" + + +# Default CSS for the JSViewer writer +DEFAULT_CSS = """\ +body {font-family: sans-serif;} +table.dataTable {width: auto !important; margin: 0 !important;} +.dataTables_filter, .dataTables_paginate {float: left !important; margin-left:1em} +""" + + +# Default CSS used when rendering a table in the IPython notebook +DEFAULT_CSS_NB = """\ +table.dataTable {clear: both; width: auto !important; margin: 0 !important;} +.dataTables_info, .dataTables_length, .dataTables_filter, .dataTables_paginate{ +display: inline-block; margin-right: 1em; } +.paginate_button { margin-right: 5px; } +""" + + +class JSViewer(object): + """Provides an interactive HTML export of a Table. + + This class provides an interface to the `DataTables + `_ library, which allow to visualize interactively + an HTML table. It is used by the `~astropy.table.Table.show_in_browser` + method. + + Parameters + ---------- + use_local_files : bool, optional + Use local files or a CDN for JavaScript libraries. Default False. + display_length : int, optional + Number or rows to show. Default to 50. + + """ + + def __init__(self, use_local_files=False, display_length=50): + self._use_local_files = use_local_files + self.display_length_menu = [[10, 25, 50, 100, 500, 1000, -1], + [10, 25, 50, 100, 500, 1000, "All"]] + self.display_length = display_length + for L in self.display_length_menu: + if display_length not in L: + L.insert(0, display_length) + + @property + def jquery_urls(self): + if self._use_local_files: + return ['file://' + join(EXTERN_JS_DIR, 'jquery-3.1.1.min.js'), + 'file://' + join(EXTERN_JS_DIR, 'jquery.dataTables.min.js')] + else: + return [conf.jquery_url, conf.datatables_url] + + @property + def css_urls(self): + if self._use_local_files: + return ['file://' + join(EXTERN_CSS_DIR, + 'jquery.dataTables.css')] + else: + return conf.css_urls + + def _jstable_file(self): + if self._use_local_files: + return 'file://' + join(EXTERN_JS_DIR, 'jquery.dataTables.min') + else: + return conf.datatables_url[:-3] + + def ipynb(self, table_id, css=None, sort_columns='[]'): + html = ''.format(css if css is not None + else DEFAULT_CSS_NB) + html += IPYNB_JS_SCRIPT.format( + display_length=self.display_length, + display_length_menu=self.display_length_menu, + datatables_url=self._jstable_file(), + tid=table_id, sort_columns=sort_columns) + return html + + def html_js(self, table_id='table0', sort_columns='[]'): + return HTML_JS_SCRIPT.format( + display_length=self.display_length, + display_length_menu=self.display_length_menu, + tid=table_id, sort_columns=sort_columns).strip() + + +def write_table_jsviewer(table, filename, table_id=None, max_lines=5000, + table_class="display compact", jskwargs=None, + css=DEFAULT_CSS): + if table_id is None: + table_id = 'table{id}'.format(id=id(table)) + + jskwargs = jskwargs or {} + jsv = JSViewer(**jskwargs) + + sortable_columns = [i for i, col in enumerate(table.columns.values()) + if col.dtype.kind in 'iufc'] + htmldict = { + 'table_id': table_id, + 'table_class': table_class, + 'css': css, + 'cssfiles': jsv.css_urls, + 'jsfiles': jsv.jquery_urls, + 'js': jsv.html_js(table_id=table_id, sort_columns=sortable_columns) + } + + if max_lines < len(table): + table = table[:max_lines] + table.write(filename, format='html', htmldict=htmldict) + + +io_registry.register_writer('jsviewer', Table, write_table_jsviewer) diff --git a/astropy/table/meta.py b/astropy/table/meta.py new file mode 100644 index 0000000..ea35e39 --- /dev/null +++ b/astropy/table/meta.py @@ -0,0 +1,342 @@ +import textwrap +import copy +from collections import OrderedDict + +from ..extern import six + +__all__ = ['get_header_from_yaml', 'get_yaml_from_header', 'get_yaml_from_table'] + + +class ColumnOrderList(list): + """ + List of tuples that sorts in a specific order that makes sense for + astropy table column attributes. + """ + + def sort(self, *args, **kwargs): + super(ColumnOrderList, self).sort() + + column_keys = ['name', 'unit', 'datatype', 'format', 'description', 'meta'] + in_dict = dict(self) + out_list = [] + + for key in column_keys: + if key in in_dict: + out_list.append((key, in_dict[key])) + for key, val in self: + if key not in column_keys: + out_list.append((key, val)) + + # Clear list in-place + del self[:] + + self.extend(out_list) + + +class ColumnDict(dict): + """ + Specialized dict subclass to represent attributes of a Column + and return items() in a preferred order. This is only for use + in generating a YAML map representation that has a fixed order. + """ + + def items(self): + """ + Return items as a ColumnOrderList, which sorts in the preferred + way for column attributes. + """ + return ColumnOrderList(super(ColumnDict, self).items()) + + +def _construct_odict(load, node): + """ + Construct OrderedDict from !!omap in yaml safe load. + + Source: https://gist.github.com/weaver/317164 + License: Unspecified + + This is the same as SafeConstructor.construct_yaml_omap(), + except the data type is changed to OrderedDict() and setitem is + used instead of append in the loop + + Examples + -------- + :: + + >>> yaml.load(''' # doctest: +SKIP + ... !!omap + ... - foo: bar + ... - mumble: quux + ... - baz: gorp + ... ''') + OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) + + >>> yaml.load('''!!omap [ foo: bar, mumble: quux, baz : gorp ]''') # doctest: +SKIP + OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) + """ + import yaml + + omap = OrderedDict() + yield omap + if not isinstance(node, yaml.SequenceNode): + raise yaml.constructor.ConstructorError( + "while constructing an ordered map", node.start_mark, + "expected a sequence, but found {}".format(node.id), node.start_mark) + + for subnode in node.value: + if not isinstance(subnode, yaml.MappingNode): + raise yaml.constructor.ConstructorError( + "while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found {}".format(subnode.id), + subnode.start_mark) + + if len(subnode.value) != 1: + raise yaml.constructor.ConstructorError( + "while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found {} items".format(len(subnode.value)), + subnode.start_mark) + + key_node, value_node = subnode.value[0] + key = load.construct_object(key_node) + value = load.construct_object(value_node) + omap[key] = value + + +def _repr_pairs(dump, tag, sequence, flow_style=None): + """ + This is the same code as BaseRepresenter.represent_sequence(), + but the value passed to dump.represent_data() in the loop is a + dictionary instead of a tuple. + + Source: https://gist.github.com/weaver/317164 + License: Unspecified + """ + import yaml + + value = [] + node = yaml.SequenceNode(tag, value, flow_style=flow_style) + if dump.alias_key is not None: + dump.represented_objects[dump.alias_key] = node + best_style = True + for (key, val) in sequence: + item = dump.represent_data({key: val}) + if not (isinstance(item, yaml.ScalarNode) and not item.style): + best_style = False + value.append(item) + if flow_style is None: + if dump.default_flow_style is not None: + node.flow_style = dump.default_flow_style + else: + node.flow_style = best_style + return node + + +def _repr_odict(dumper, data): + """ + Represent OrderedDict in yaml dump. + + Source: https://gist.github.com/weaver/317164 + License: Unspecified + + >>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) + >>> yaml.dump(data, default_flow_style=False) # doctest: +SKIP + '!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n' + >>> yaml.dump(data, default_flow_style=True) # doctest: +SKIP + '!!omap [foo: bar, mumble: quux, baz: gorp]\\n' + """ + return _repr_pairs(dumper, u'tag:yaml.org,2002:omap', six.iteritems(data)) + + +def _repr_column_dict(dumper, data): + """ + Represent ColumnDict in yaml dump. + + This is the same as an ordinary mapping except that the keys + are written in a fixed order that makes sense for astropy table + columns. + """ + return dumper.represent_mapping(u'tag:yaml.org,2002:map', data) + + +def _get_col_attributes(col): + """ + Extract information from a column (apart from the values) that is required + to fully serialize the column. + """ + attrs = ColumnDict() + attrs['name'] = col.info.name + + type_name = col.info.dtype.type.__name__ + if not six.PY2 and type_name.startswith(('bytes', 'str')): + type_name = 'string' + if type_name.endswith('_'): + type_name = type_name[:-1] # string_ and bool_ lose the final _ for ECSV + attrs['datatype'] = type_name + + # Set the output attributes + for attr, nontrivial, xform in (('unit', lambda x: x is not None, str), + ('format', lambda x: x is not None, None), + ('description', lambda x: x is not None, None), + ('meta', lambda x: x, None)): + col_attr = getattr(col.info, attr) + if nontrivial(col_attr): + attrs[attr] = xform(col_attr) if xform else col_attr + + return attrs + + +def get_yaml_from_table(table): + """ + Return lines with a YAML representation of header content from the ``table``. + + Parameters + ---------- + table : `~astropy.table.Table` object + Table for which header content is output + + Returns + ------- + lines : list + List of text lines with YAML header content + """ + + header = {'cols': list(six.itervalues(table.columns))} + if table.meta: + header['meta'] = table.meta + + return get_yaml_from_header(header) + + +def get_yaml_from_header(header): + """ + Return lines with a YAML representation of header content from a Table. + + The ``header`` dict must contain these keys: + + - 'cols' : list of table column objects (required) + - 'meta' : table 'meta' attribute (optional) + + Other keys included in ``header`` will be serialized in the output YAML + representation. + + Parameters + ---------- + header : dict + Table header content + + Returns + ------- + lines : list + List of text lines with YAML header content + """ + try: + import yaml + except ImportError: + raise ImportError('`import yaml` failed, PyYAML package is required for ECSV format') + + from ..io.misc.yaml import AstropyDumper + + class TableDumper(AstropyDumper): + """ + Custom Dumper that represents OrderedDict as an !!omap object. + """ + + def represent_mapping(self, tag, mapping, flow_style=None): + """ + This is a combination of the Python 2 and 3 versions of this method + in the PyYAML library to allow the required key ordering via the + ColumnOrderList object. The Python 3 version insists on turning the + items() mapping into a list object and sorting, which results in + alphabetical order for the column keys. + """ + value = [] + node = yaml.MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = mapping.items() + if hasattr(mapping, 'sort'): + mapping.sort() + else: + mapping = list(mapping) + try: + mapping = sorted(mapping) + except TypeError: + pass + + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, yaml.ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + TableDumper.add_representer(OrderedDict, _repr_odict) + TableDumper.add_representer(ColumnDict, _repr_column_dict) + + header = copy.copy(header) # Don't overwrite original + header['datatype'] = [_get_col_attributes(col) for col in header['cols']] + del header['cols'] + + lines = yaml.dump(header, Dumper=TableDumper).splitlines() + return lines + + +class YamlParseError(Exception): + pass + + +def get_header_from_yaml(lines): + """ + Get a header dict from input ``lines`` which should be valid YAML in the + ECSV meta format. This input will typically be created by + get_yaml_from_header. The output is a dictionary which describes all the + table and column meta. + + The get_cols() method in the io/ascii/ecsv.py file should be used as a + guide to using the information when constructing a table using this + header dict information. + + Parameters + ---------- + lines : list + List of text lines with YAML header content + + Returns + ------- + header : dict + Dictionary describing table and column meta + """ + + try: + import yaml + except ImportError: + raise ImportError('`import yaml` failed, PyYAML package is required for ECSV format') + + from ..io.misc.yaml import AstropyLoader + + class TableLoader(AstropyLoader): + """ + Custom Loader that constructs OrderedDict from an !!omap object. + This does nothing but provide a namespace for adding the + custom odict constructor. + """ + + TableLoader.add_constructor(u'tag:yaml.org,2002:omap', _construct_odict) + # Now actually load the YAML data structure into `meta` + header_yaml = textwrap.dedent('\n'.join(lines)) + try: + header = yaml.load(header_yaml, Loader=TableLoader) + except Exception as err: + raise YamlParseError(str(err)) + + return header diff --git a/astropy/table/np_utils.py b/astropy/table/np_utils.py new file mode 100644 index 0000000..9310ed0 --- /dev/null +++ b/astropy/table/np_utils.py @@ -0,0 +1,203 @@ +""" +High-level operations for numpy structured arrays. + +Some code and inspiration taken from numpy.lib.recfunctions.join_by(). +Redistribution license restrictions apply. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ..extern import six +from ..extern.six.moves import zip, range + +from itertools import chain +import collections +from collections import OrderedDict, Counter + +import numpy as np +import numpy.ma as ma + +from . import _np_utils + +__all__ = ['TableMergeError'] + + +class TableMergeError(ValueError): + pass + + +def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}', + table_names=None): + """ + Find the column names mapping when merging the list of structured ndarrays + ``arrays``. It is assumed that col names in ``common_names`` are to be + merged into a single column while the rest will be uniquely represented + in the output. The args ``uniq_col_name`` and ``table_names`` specify + how to rename columns in case of conflicts. + + Returns a dict mapping each output column name to the input(s). This takes the form + {outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names + will be present, while for the other non-key columns the value will be (col_name_0, + None, ..) or (None, col_name_1, ..) etc. + """ + + col_name_map = collections.defaultdict(lambda: [None] * len(arrays)) + col_name_list = [] + + if table_names is None: + table_names = [six.text_type(ii + 1) for ii in range(len(arrays))] + + for idx, array in enumerate(arrays): + table_name = table_names[idx] + for name in array.dtype.names: + out_name = name + + if name in common_names: + # If name is in the list of common_names then insert into + # the column name list, but just once. + if name not in col_name_list: + col_name_list.append(name) + else: + # If name is not one of the common column outputs, and it collides + # with the names in one of the other arrays, then rename + others = list(arrays) + others.pop(idx) + if any(name in other.dtype.names for other in others): + out_name = uniq_col_name.format(table_name=table_name, col_name=name) + col_name_list.append(out_name) + + col_name_map[out_name][idx] = name + + # Check for duplicate output column names + col_name_count = Counter(col_name_list) + repeated_names = [name for name, count in six.iteritems(col_name_count) if count > 1] + if repeated_names: + raise TableMergeError('Merging column names resulted in duplicates: {0}. ' + 'Change uniq_col_name or table_names args to fix this.' + .format(repeated_names)) + + # Convert col_name_map to a regular dict with tuple (immutable) values + col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list) + + return col_name_map + + +def get_descrs(arrays, col_name_map): + """ + Find the dtypes descrs resulting from merging the list of arrays' dtypes, + using the column name mapping ``col_name_map``. + + Return a list of descrs for the output. + """ + + out_descrs = [] + + for out_name, in_names in six.iteritems(col_name_map): + # List of input arrays that contribute to this output column + in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] + + # List of names of the columns that contribute to this output column. + names = [name for name in in_names if name is not None] + + # Output dtype is the superset of all dtypes in in_arrays + try: + dtype = common_dtype(in_cols) + except TableMergeError as tme: + # Beautify the error message when we are trying to merge columns with incompatible + # types by including the name of the columns that originated the error. + raise TableMergeError("The '{0}' columns have incompatible types: {1}" + .format(names[0], tme._incompat_types)) + + # Make sure all input shapes are the same + uniq_shapes = set(col.shape[1:] for col in in_cols) + if len(uniq_shapes) != 1: + raise TableMergeError('Key columns {0!r} have different shape'.format(name)) + shape = uniq_shapes.pop() + + out_descrs.append((fix_column_name(out_name), dtype, shape)) + + return out_descrs + + +def common_dtype(cols): + """ + Use numpy to find the common dtype for a list of structured ndarray columns. + + Only allow columns within the following fundamental numpy data types: + np.bool_, np.object_, np.number, np.character, np.void + """ + np_types = (np.bool_, np.object_, np.number, np.character, np.void) + uniq_types = set(tuple(issubclass(col.dtype.type, np_type) for np_type in np_types) + for col in cols) + if len(uniq_types) > 1: + # Embed into the exception the actual list of incompatible types. + incompat_types = [col.dtype.name for col in cols] + tme = TableMergeError('Columns have incompatible types {0}' + .format(incompat_types)) + tme._incompat_types = incompat_types + raise tme + + arrs = [np.empty(1, dtype=col.dtype) for col in cols] + + # For string-type arrays need to explicitly fill in non-zero + # values or the final arr_common = .. step is unpredictable. + for arr in arrs: + if arr.dtype.kind in ('S', 'U'): + arr[0] = '0' * arr.itemsize + + arr_common = np.array([arr[0] for arr in arrs]) + return arr_common.dtype.str + + +def _check_for_sequence_of_structured_arrays(arrays): + err = '`arrays` arg must be a sequence (e.g. list) of structured arrays' + if not isinstance(arrays, collections.Sequence): + raise TypeError(err) + for array in arrays: + # Must be structured array + if not isinstance(array, np.ndarray) or array.dtype.names is None: + raise TypeError(err) + if len(arrays) == 0: + raise ValueError('`arrays` arg must include at least one array') + + +def fix_column_name(val): + """ + Fixes column names so that they are compatible with Numpy on + Python 2. Raises a ValueError exception if the column name + contains Unicode characters, which can not reasonably be used as a + column name. + """ + if val is not None: + try: + val = str(val) + except UnicodeEncodeError: + if six.PY2: + raise ValueError( + "Column names must not contain Unicode characters " + "on Python 2") + raise + + return val + + +def recarray_fromrecords(rec_list): + """ + Partial replacement for `~numpy.core.records.fromrecords` which includes + a workaround for the bug with unicode arrays described at: + https://github.com/astropy/astropy/issues/3052 + + This should not serve as a full replacement for the original function; + this only does enough to fulfill the needs of the table module. + """ + + # Note: This is just copying what Numpy does for converting arbitrary rows + # to column arrays in the recarray module; it could be there is a better + # way + nfields = len(rec_list[0]) + obj = np.array(rec_list, dtype=object) + array_list = [np.array(obj[..., i].tolist()) for i in range(nfields)] + formats = [] + for obj in array_list: + formats.append(obj.dtype.str) + formats = ','.join(formats) + return np.rec.fromarrays(array_list, formats=formats) diff --git a/astropy/table/operations.py b/astropy/table/operations.py new file mode 100644 index 0000000..6eaaa3a --- /dev/null +++ b/astropy/table/operations.py @@ -0,0 +1,872 @@ +""" +High-level table operations: + +- join() +- hstack() +- vstack() +""" +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ..extern import six +from ..extern.six.moves import zip, range + +from copy import deepcopy +import warnings +import collections +import itertools +from collections import OrderedDict, Counter + +import numpy as np +from numpy import ma + +from ..utils import metadata +from .column import Column + +from . import _np_utils +from .np_utils import fix_column_name, TableMergeError + +__all__ = ['join', 'hstack', 'vstack', 'unique'] + + +def _merge_table_meta(out, tables, metadata_conflicts='warn'): + out_meta = deepcopy(tables[0].meta) + for table in tables[1:]: + out_meta = metadata.merge(out_meta, table.meta, metadata_conflicts=metadata_conflicts) + out.meta.update(out_meta) + + +def _get_list_of_tables(tables): + """ + Check that tables is a Table or sequence of Tables. Returns the + corresponding list of Tables. + """ + from .table import Table, Row + + # Make sure we have a list of things + if not isinstance(tables, collections.Sequence): + tables = [tables] + + # Make sure each thing is a Table or Row + if any(not isinstance(x, (Table, Row)) for x in tables) or len(tables) == 0: + raise TypeError('`tables` arg must be a Table or sequence of Tables or Rows') + + # Convert any Rows to Tables + tables = [(x if isinstance(x, Table) else Table(x)) for x in tables] + + return tables + + +def _get_out_class(objs): + """ + From a list of input objects ``objs`` get merged output object class. + + This is just taken as the deepest subclass. This doesn't handle complicated + inheritance schemes. + """ + out_class = objs[0].__class__ + for obj in objs[1:]: + if issubclass(obj.__class__, out_class): + out_class = obj.__class__ + + if any(not issubclass(out_class, obj.__class__) for obj in objs): + raise ValueError('unmergeable object classes {}' + .format([obj.__class__.__name__ for obj in objs])) + + return out_class + + +def join(left, right, keys=None, join_type='inner', + uniq_col_name='{col_name}_{table_name}', + table_names=['1', '2'], metadata_conflicts='warn'): + """ + Perform a join of the left table with the right table on specified keys. + + Parameters + ---------- + left : Table object or a value that will initialize a Table object + Left side table in the join + right : Table object or a value that will initialize a Table object + Right side table in the join + keys : str or list of str + Name(s) of column(s) used to match rows of left and right tables. + Default is to use all columns which are common to both tables. + join_type : str + Join type ('inner' | 'outer' | 'left' | 'right'), default is 'inner' + uniq_col_name : str or None + String generate a unique output column name in case of a conflict. + The default is '{col_name}_{table_name}'. + table_names : list of str or None + Two-element list of table names used when generating unique output + column names. The default is ['1', '2']. + metadata_conflicts : str + How to proceed with metadata conflicts. This should be one of: + * ``'silent'``: silently pick the last conflicting meta-data value + * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) + * ``'error'``: raise an exception. + + Returns + ------- + joined_table : `~astropy.table.Table` object + New table containing the result of the join operation. + """ + from .table import Table + + # Try converting inputs to Table as needed + if not isinstance(left, Table): + left = Table(left) + if not isinstance(right, Table): + right = Table(right) + + col_name_map = OrderedDict() + out = _join(left, right, keys, join_type, + uniq_col_name, table_names, col_name_map, metadata_conflicts) + + # Merge the column and table meta data. Table subclasses might override + # these methods for custom merge behavior. + _merge_table_meta(out, [left, right], metadata_conflicts=metadata_conflicts) + + return out + + +def vstack(tables, join_type='outer', metadata_conflicts='warn'): + """ + Stack tables vertically (along rows) + + A ``join_type`` of 'exact' means that the tables must all have exactly + the same column names (though the order can vary). If ``join_type`` + is 'inner' then the intersection of common columns will be the output. + A value of 'outer' (default) means the output will have the union of + all columns, with table values being masked where no common values are + available. + + Parameters + ---------- + tables : Table or list of Table objects + Table(s) to stack along rows (vertically) with the current table + join_type : str + Join type ('inner' | 'exact' | 'outer'), default is 'outer' + metadata_conflicts : str + How to proceed with metadata conflicts. This should be one of: + * ``'silent'``: silently pick the last conflicting meta-data value + * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) + * ``'error'``: raise an exception. + + Returns + ------- + stacked_table : `~astropy.table.Table` object + New table containing the stacked data from the input tables. + + Examples + -------- + To stack two tables along rows do:: + + >>> from astropy.table import vstack, Table + >>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b')) + >>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b')) + >>> print(t1) + a b + --- --- + 1 3 + 2 4 + >>> print(t2) + a b + --- --- + 5 7 + 6 8 + >>> print(vstack([t1, t2])) + a b + --- --- + 1 3 + 2 4 + 5 7 + 6 8 + """ + tables = _get_list_of_tables(tables) # validates input + if len(tables) == 1: + return tables[0] # no point in stacking a single table + col_name_map = OrderedDict() + + out = _vstack(tables, join_type, col_name_map, metadata_conflicts) + + # Merge table metadata + _merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts) + + return out + + +def hstack(tables, join_type='outer', + uniq_col_name='{col_name}_{table_name}', table_names=None, + metadata_conflicts='warn'): + """ + Stack tables along columns (horizontally) + + A ``join_type`` of 'exact' means that the tables must all + have exactly the same number of rows. If ``join_type`` is 'inner' then + the intersection of rows will be the output. A value of 'outer' (default) + means the output will have the union of all rows, with table values being + masked where no common values are available. + + Parameters + ---------- + tables : List of Table objects + Tables to stack along columns (horizontally) with the current table + join_type : str + Join type ('inner' | 'exact' | 'outer'), default is 'outer' + uniq_col_name : str or None + String generate a unique output column name in case of a conflict. + The default is '{col_name}_{table_name}'. + table_names : list of str or None + Two-element list of table names used when generating unique output + column names. The default is ['1', '2', ..]. + metadata_conflicts : str + How to proceed with metadata conflicts. This should be one of: + * ``'silent'``: silently pick the last conflicting meta-data value + * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) + * ``'error'``: raise an exception. + + Returns + ------- + stacked_table : `~astropy.table.Table` object + New table containing the stacked data from the input tables. + + Examples + -------- + To stack two tables horizontally (along columns) do:: + + >>> from astropy.table import Table, hstack + >>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b')) + >>> t2 = Table({'c': [5, 6], 'd': [7, 8]}, names=('c', 'd')) + >>> print(t1) + a b + --- --- + 1 3 + 2 4 + >>> print(t2) + c d + --- --- + 5 7 + 6 8 + >>> print(hstack([t1, t2])) + a b c d + --- --- --- --- + 1 3 5 7 + 2 4 6 8 + """ + tables = _get_list_of_tables(tables) # validates input + if len(tables) == 1: + return tables[0] # no point in stacking a single table + col_name_map = OrderedDict() + + out = _hstack(tables, join_type, uniq_col_name, table_names, + col_name_map) + + _merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts) + + return out + + +def unique(input_table, keys=None, silent=False, keep='first'): + """ + Returns the unique rows of a table. + + Parameters + ---------- + + input_table : `~astropy.table.Table` object or a value that + will initialize a `~astropy.table.Table` object + keys : str or list of str + Name(s) of column(s) used to create unique rows. + Default is to use all columns. + keep : one of 'first', 'last' or 'none' + Whether to keep the first or last row for each set of + duplicates. If 'none', all rows that are duplicate are + removed, leaving only rows that are already unique in + the input. + Default is 'first'. + silent : boolean + If `True`, masked value column(s) are silently removed from + ``keys``. If `False`, an exception is raised when ``keys`` + contains masked value column(s). + Default is `False`. + + Returns + ------- + unique_table : `~astropy.table.Table` object + New table containing only the unique rows of ``input_table``. + + Examples + -------- + >>> from astropy.table import unique, Table + >>> import numpy as np + >>> table = Table(data=[[1,2,3,2,3,3], + ... [2,3,4,5,4,6], + ... [3,4,5,6,7,8]], + ... names=['col1', 'col2', 'col3'], + ... dtype=[np.int32, np.int32, np.int32]) + >>> table + + col1 col2 col3 + int32 int32 int32 + ----- ----- ----- + 1 2 3 + 2 3 4 + 3 4 5 + 2 5 6 + 3 4 7 + 3 6 8 + >>> unique(table, keys='col1') +
    + col1 col2 col3 + int32 int32 int32 + ----- ----- ----- + 1 2 3 + 2 3 4 + 3 4 5 + >>> unique(table, keys=['col1'], keep='last') +
    + col1 col2 col3 + int32 int32 int32 + ----- ----- ----- + 1 2 3 + 2 5 6 + 3 6 8 + >>> unique(table, keys=['col1', 'col2']) +
    + col1 col2 col3 + int32 int32 int32 + ----- ----- ----- + 1 2 3 + 2 3 4 + 2 5 6 + 3 4 5 + 3 6 8 + >>> unique(table, keys=['col1', 'col2'], keep='none') +
    + col1 col2 col3 + int32 int32 int32 + ----- ----- ----- + 1 2 3 + 2 3 4 + 2 5 6 + 3 6 8 + >>> unique(table, keys=['col1'], keep='none') +
    + col1 col2 col3 + int32 int32 int32 + ----- ----- ----- + 1 2 3 + + """ + + if keep not in ('first', 'last', 'none'): + raise ValueError("'keep' should be one of 'first', 'last', 'none'") + + if isinstance(keys, six.string_types): + keys = [keys] + if keys is None: + keys = input_table.colnames + else: + if len(set(keys)) != len(keys): + raise ValueError("duplicate key names") + + if input_table.masked: + nkeys = 0 + for key in keys[:]: + if np.any(input_table[key].mask): + if not silent: + raise ValueError( + "cannot use columns with masked values as keys; " + "remove column '{0}' from keys and rerun " + "unique()".format(key)) + del keys[keys.index(key)] + if len(keys) == 0: + raise ValueError("no column remained in ``keys``; " + "unique() cannot work with masked value " + "key columns") + + grouped_table = input_table.group_by(keys) + indices = grouped_table.groups.indices + if keep == 'first': + indices = indices[:-1] + elif keep == 'last': + indices = indices[1:] - 1 + else: + indices = indices[:-1][np.diff(indices) == 1] + + return grouped_table[indices] + + +def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}', + table_names=None): + """ + Find the column names mapping when merging the list of tables + ``arrays``. It is assumed that col names in ``common_names`` are to be + merged into a single column while the rest will be uniquely represented + in the output. The args ``uniq_col_name`` and ``table_names`` specify + how to rename columns in case of conflicts. + + Returns a dict mapping each output column name to the input(s). This takes the form + {outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names + will be present, while for the other non-key columns the value will be (col_name_0, + None, ..) or (None, col_name_1, ..) etc. + """ + + col_name_map = collections.defaultdict(lambda: [None] * len(arrays)) + col_name_list = [] + + if table_names is None: + table_names = [six.text_type(ii + 1) for ii in range(len(arrays))] + + for idx, array in enumerate(arrays): + table_name = table_names[idx] + for name in array.colnames: + out_name = name + + if name in common_names: + # If name is in the list of common_names then insert into + # the column name list, but just once. + if name not in col_name_list: + col_name_list.append(name) + else: + # If name is not one of the common column outputs, and it collides + # with the names in one of the other arrays, then rename + others = list(arrays) + others.pop(idx) + if any(name in other.colnames for other in others): + out_name = uniq_col_name.format(table_name=table_name, col_name=name) + col_name_list.append(out_name) + + col_name_map[out_name][idx] = name + + # Check for duplicate output column names + col_name_count = Counter(col_name_list) + repeated_names = [name for name, count in six.iteritems(col_name_count) if count > 1] + if repeated_names: + raise TableMergeError('Merging column names resulted in duplicates: {0}. ' + 'Change uniq_col_name or table_names args to fix this.' + .format(repeated_names)) + + # Convert col_name_map to a regular dict with tuple (immutable) values + col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list) + + return col_name_map + + +def get_descrs(arrays, col_name_map): + """ + Find the dtypes descrs resulting from merging the list of arrays' dtypes, + using the column name mapping ``col_name_map``. + + Return a list of descrs for the output. + """ + + out_descrs = [] + + for out_name, in_names in six.iteritems(col_name_map): + # List of input arrays that contribute to this output column + in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] + + # List of names of the columns that contribute to this output column. + names = [name for name in in_names if name is not None] + + # Output dtype is the superset of all dtypes in in_arrays + try: + dtype = common_dtype(in_cols) + except TableMergeError as tme: + # Beautify the error message when we are trying to merge columns with incompatible + # types by including the name of the columns that originated the error. + raise TableMergeError("The '{0}' columns have incompatible types: {1}" + .format(names[0], tme._incompat_types)) + + # Make sure all input shapes are the same + uniq_shapes = set(col.shape[1:] for col in in_cols) + if len(uniq_shapes) != 1: + raise TableMergeError('Key columns {0!r} have different shape'.format(names)) + shape = uniq_shapes.pop() + + out_descrs.append((fix_column_name(out_name), dtype, shape)) + + return out_descrs + + +def common_dtype(cols): + """ + Use numpy to find the common dtype for a list of columns. + + Only allow columns within the following fundamental numpy data types: + np.bool_, np.object_, np.number, np.character, np.void + """ + try: + return metadata.common_dtype(cols) + except metadata.MergeConflictError as err: + tme = TableMergeError('Columns have incompatible types {0}' + .format(err._incompat_types)) + tme._incompat_types = err._incompat_types + raise tme + + +def _join(left, right, keys=None, join_type='inner', + uniq_col_name='{col_name}_{table_name}', + table_names=['1', '2'], + col_name_map=None, metadata_conflicts='warn'): + """ + Perform a join of the left and right Tables on specified keys. + + Parameters + ---------- + left : Table + Left side table in the join + right : Table + Right side table in the join + keys : str or list of str + Name(s) of column(s) used to match rows of left and right tables. + Default is to use all columns which are common to both tables. + join_type : str + Join type ('inner' | 'outer' | 'left' | 'right'), default is 'inner' + uniq_col_name : str or None + String generate a unique output column name in case of a conflict. + The default is '{col_name}_{table_name}'. + table_names : list of str or None + Two-element list of table names used when generating unique output + column names. The default is ['1', '2']. + col_name_map : empty dict or None + If passed as a dict then it will be updated in-place with the + mapping of output to input column names. + + Returns + ------- + joined_table : `~astropy.table.Table` object + New table containing the result of the join operation. + """ + # Store user-provided col_name_map until the end + _col_name_map = col_name_map + + if join_type not in ('inner', 'outer', 'left', 'right'): + raise ValueError("The 'join_type' argument should be in 'inner', " + "'outer', 'left' or 'right' (got '{0}' instead)". + format(join_type)) + + # If we have a single key, put it in a tuple + if keys is None: + keys = tuple(name for name in left.colnames if name in right.colnames) + if len(keys) == 0: + raise TableMergeError('No keys in common between left and right tables') + elif isinstance(keys, six.string_types): + keys = (keys,) + + # Check the key columns + for arr, arr_label in ((left, 'Left'), (right, 'Right')): + for name in keys: + if name not in arr.colnames: + raise TableMergeError('{0} table does not have key column {1!r}' + .format(arr_label, name)) + if hasattr(arr[name], 'mask') and np.any(arr[name].mask): + raise TableMergeError('{0} key column {1!r} has missing values' + .format(arr_label, name)) + if not isinstance(arr[name], np.ndarray): + raise ValueError("non-ndarray column '{}' not allowed as a key column" + .format(name)) + + len_left, len_right = len(left), len(right) + + if len_left == 0 or len_right == 0: + raise ValueError('input tables for join must both have at least one row') + + # Joined array dtype as a list of descr (name, type_str, shape) tuples + col_name_map = get_col_name_map([left, right], keys, uniq_col_name, table_names) + out_descrs = get_descrs([left, right], col_name_map) + + # Make an array with just the key columns. This uses a temporary + # structured array for efficiency. + out_keys_dtype = [descr for descr in out_descrs if descr[0] in keys] + out_keys = np.empty(len_left + len_right, dtype=out_keys_dtype) + for key in keys: + out_keys[key][:len_left] = left[key] + out_keys[key][len_left:] = right[key] + idx_sort = out_keys.argsort(order=keys) + out_keys = out_keys[idx_sort] + + # Get all keys + diffs = np.concatenate(([True], out_keys[1:] != out_keys[:-1], [True])) + idxs = np.flatnonzero(diffs) + + # Main inner loop in Cython to compute the cartesion product + # indices for the given join type + int_join_type = {'inner': 0, 'outer': 1, 'left': 2, 'right': 3}[join_type] + masked, n_out, left_out, left_mask, right_out, right_mask = \ + _np_utils.join_inner(idxs, idx_sort, len_left, int_join_type) + + # If either of the inputs are masked then the output is masked + if left.masked or right.masked: + masked = True + masked = bool(masked) + + out = _get_out_class([left, right])(masked=masked) + + for out_name, dtype, shape in out_descrs: + + left_name, right_name = col_name_map[out_name] + if left_name and right_name: # this is a key which comes from left and right + cols = [left[left_name], right[right_name]] + + col_cls = _get_out_class(cols) + if not hasattr(col_cls.info, 'new_like'): + raise NotImplementedError('join unavailable for mixin column type(s): {}' + .format(col_cls.__name__)) + + out[out_name] = col_cls.info.new_like(cols, n_out, metadata_conflicts, out_name) + + if issubclass(col_cls, Column): + out[out_name][:] = np.where(right_mask, + left[left_name].take(left_out), + right[right_name].take(right_out)) + else: + # np.where does not work for mixin columns (e.g. Quantity) so + # use a slower workaround. + left_mask = ~right_mask + if np.any(left_mask): + out[out_name][left_mask] = left[left_name].take(left_out) + if np.any(right_mask): + out[out_name][right_mask] = right[right_name].take(right_out) + continue + elif left_name: # out_name came from the left table + name, array, array_out, array_mask = left_name, left, left_out, left_mask + elif right_name: + name, array, array_out, array_mask = right_name, right, right_out, right_mask + else: + raise TableMergeError('Unexpected column names (maybe one is ""?)') + + # Finally add the joined column to the output table. + out[out_name] = array[name][array_out] + + # If the output table is masked then set the output column masking + # accordingly. Check for columns that don't support a mask attribute. + if masked: + # array_mask is 1-d corresponding to length of output column. We need + # make it have the correct shape for broadcasting, i.e. (length, 1, 1, ..). + # Mixin columns might not have ndim attribute so use len(col.shape). + array_mask.shape = (out[out_name].shape[0],) + (1,) * (len(out[out_name].shape) - 1) + + if array.masked: + array_mask = array_mask | array[name].mask[array_out] + try: + out[out_name].mask[:] = array_mask + except ValueError: + raise NotImplementedError( + "join requires masking column '{}' but column" + " type {} does not support masking" + .format(out_name, out[out_name].__class__.__name__)) + + # If col_name_map supplied as a dict input, then update. + if isinstance(_col_name_map, collections.Mapping): + _col_name_map.update(col_name_map) + + return out + + +def _vstack(arrays, join_type='outer', col_name_map=None, metadata_conflicts='warn'): + """ + Stack Tables vertically (by rows) + + A ``join_type`` of 'exact' (default) means that the arrays must all + have exactly the same column names (though the order can vary). If + ``join_type`` is 'inner' then the intersection of common columns will + be the output. A value of 'outer' means the output will have the union of + all columns, with array values being masked where no common values are + available. + + Parameters + ---------- + arrays : list of Tables + Tables to stack by rows (vertically) + join_type : str + Join type ('inner' | 'exact' | 'outer'), default is 'outer' + col_name_map : empty dict or None + If passed as a dict then it will be updated in-place with the + mapping of output to input column names. + + Returns + ------- + stacked_table : `~astropy.table.Table` object + New table containing the stacked data from the input tables. + """ + # Store user-provided col_name_map until the end + _col_name_map = col_name_map + + # Input validation + if join_type not in ('inner', 'exact', 'outer'): + raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'") + + # Trivial case of one input array + if len(arrays) == 1: + return arrays[0] + + # Start by assuming an outer match where all names go to output + names = set(itertools.chain(*[arr.colnames for arr in arrays])) + col_name_map = get_col_name_map(arrays, names) + + # If require_match is True then the output must have exactly the same + # number of columns as each input array + if join_type == 'exact': + for names in six.itervalues(col_name_map): + if any(x is None for x in names): + raise TableMergeError('Inconsistent columns in input arrays ' + "(use 'inner' or 'outer' join_type to " + "allow non-matching columns)") + join_type = 'outer' + + # For an inner join, keep only columns where all input arrays have that column + if join_type == 'inner': + col_name_map = OrderedDict((name, in_names) for name, in_names in six.iteritems(col_name_map) + if all(x is not None for x in in_names)) + if len(col_name_map) == 0: + raise TableMergeError('Input arrays have no columns in common') + + # If there are any output columns where one or more input arrays are missing + # then the output must be masked. If any input arrays are masked then + # output is masked. + masked = any(getattr(arr, 'masked', False) for arr in arrays) + for names in six.itervalues(col_name_map): + if any(x is None for x in names): + masked = True + break + + lens = [len(arr) for arr in arrays] + n_rows = sum(lens) + out = _get_out_class(arrays)(masked=masked) + + for out_name, in_names in six.iteritems(col_name_map): + # List of input arrays that contribute to this output column + cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] + + col_cls = _get_out_class(cols) + if not hasattr(col_cls.info, 'new_like'): + raise NotImplementedError('vstack unavailable for mixin column type(s): {}' + .format(col_cls.__name__)) + try: + out[out_name] = col_cls.info.new_like(cols, n_rows, metadata_conflicts, out_name) + except metadata.MergeConflictError as err: + # Beautify the error message when we are trying to merge columns with incompatible + # types by including the name of the columns that originated the error. + raise TableMergeError("The '{0}' columns have incompatible types: {1}" + .format(out_name, err._incompat_types)) + + idx0 = 0 + for name, array in zip(in_names, arrays): + idx1 = idx0 + len(array) + if name in array.colnames: + out[out_name][idx0:idx1] = array[name] + else: + try: + out[out_name].mask[idx0:idx1] = True + except ValueError: + raise NotImplementedError( + "vstack requires masking column '{}' but column" + " type {} does not support masking" + .format(out_name, out[out_name].__class__.__name__)) + idx0 = idx1 + + # If col_name_map supplied as a dict input, then update. + if isinstance(_col_name_map, collections.Mapping): + _col_name_map.update(col_name_map) + + return out + + +def _hstack(arrays, join_type='outer', uniq_col_name='{col_name}_{table_name}', + table_names=None, col_name_map=None): + """ + Stack tables horizontally (by columns) + + A ``join_type`` of 'exact' (default) means that the arrays must all + have exactly the same number of rows. If ``join_type`` is 'inner' then + the intersection of rows will be the output. A value of 'outer' means + the output will have the union of all rows, with array values being + masked where no common values are available. + + Parameters + ---------- + arrays : List of tables + Tables to stack by columns (horizontally) + join_type : str + Join type ('inner' | 'exact' | 'outer'), default is 'outer' + uniq_col_name : str or None + String generate a unique output column name in case of a conflict. + The default is '{col_name}_{table_name}'. + table_names : list of str or None + Two-element list of table names used when generating unique output + column names. The default is ['1', '2', ..]. + + Returns + ------- + stacked_table : `~astropy.table.Table` object + New table containing the stacked data from the input tables. + """ + + # Store user-provided col_name_map until the end + _col_name_map = col_name_map + + # Input validation + if join_type not in ('inner', 'exact', 'outer'): + raise ValueError("join_type arg must be either 'inner', 'exact' or 'outer'") + + if table_names is None: + table_names = ['{0}'.format(ii + 1) for ii in range(len(arrays))] + if len(arrays) != len(table_names): + raise ValueError('Number of arrays must match number of table_names') + + # Trivial case of one input arrays + if len(arrays) == 1: + return arrays[0] + + col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names) + + # If require_match is True then all input arrays must have the same length + arr_lens = [len(arr) for arr in arrays] + if join_type == 'exact': + if len(set(arr_lens)) > 1: + raise TableMergeError("Inconsistent number of rows in input arrays " + "(use 'inner' or 'outer' join_type to allow " + "non-matching rows)") + join_type = 'outer' + + # For an inner join, keep only the common rows + if join_type == 'inner': + min_arr_len = min(arr_lens) + if len(set(arr_lens)) > 1: + arrays = [arr[:min_arr_len] for arr in arrays] + arr_lens = [min_arr_len for arr in arrays] + + # If there are any output rows where one or more input arrays are missing + # then the output must be masked. If any input arrays are masked then + # output is masked. + masked = any(getattr(arr, 'masked', False) for arr in arrays) or len(set(arr_lens)) > 1 + + n_rows = max(arr_lens) + out = _get_out_class(arrays)(masked=masked) + + for out_name, in_names in six.iteritems(col_name_map): + for name, array, arr_len in zip(in_names, arrays, arr_lens): + if name is None: + continue + + if n_rows > arr_len: + indices = np.arange(n_rows) + indices[arr_len:] = 0 + out[out_name] = array[name][indices] + try: + out[out_name].mask[arr_len:] = True + except ValueError: + raise NotImplementedError( + "hstack requires masking column '{}' but column" + " type {} does not support masking" + .format(out_name, out[out_name].__class__.__name__)) + else: + out[out_name] = array[name][:n_rows] + + # If col_name_map supplied as a dict input, then update. + if isinstance(_col_name_map, collections.Mapping): + _col_name_map.update(col_name_map) + + return out diff --git a/astropy/table/pprint.py b/astropy/table/pprint.py new file mode 100644 index 0000000..f7c7632 --- /dev/null +++ b/astropy/table/pprint.py @@ -0,0 +1,713 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ..extern import six +from ..extern.six import text_type +from ..extern.six.moves import zip, range + +import os +import sys +import re + +import numpy as np + +from .. import log +from ..utils.console import Getch, color_print, terminal_size, conf +from ..utils.data_info import dtype_info_name + +__all__ = [] + + +def default_format_func(format_, val): + if isinstance(val, bytes): + return val.decode('utf-8', errors='replace') + else: + return text_type(val) + + +# The first three functions are helpers for _auto_format_func + +def _use_str_for_masked_values(format_func): + """Wrap format function to trap masked values. + + String format functions and most user functions will not be able to deal + with masked values, so we wrap them to ensure they are passed to str(). + """ + return lambda format_, val: (str(val) if val is np.ma.masked + else format_func(format_, val)) + + +def _possible_string_format_functions(format_): + """Iterate through possible string-derived format functions. + + A string can either be a format specifier for the format built-in, + a new-style format string, or an old-style format string. + """ + yield lambda format_, val: format(val, format_) + yield lambda format_, val: format_.format(val) + yield lambda format_, val: format_ % val + + +def get_auto_format_func( + col=None, + possible_string_format_functions=_possible_string_format_functions): + """ + Return a wrapped ``auto_format_func`` function which is used in + formatting table columns. This is primarily an internal function but + gets used directly in other parts of astropy, e.g. `astropy.io.ascii`. + + Parameters + ---------- + col_name : object, optional + Hashable object to identify column like id or name. Default is None. + + possible_string_format_functions : func, optional + Function that yields possible string formatting functions + (defaults to internal function to do this). + + Returns + ------- + Wrapped ``auto_format_func`` function + """ + + def _auto_format_func(format_, val): + """Format ``val`` according to ``format_`` for a plain format specifier, + old- or new-style format strings, or using a user supplied function. + More importantly, determine and cache (in _format_funcs) a function + that will do this subsequently. In this way this complicated logic is + only done for the first value. + + Returns the formatted value. + """ + if format_ is None: + return default_format_func(format_, val) + + if format_ in col.info._format_funcs: + return col.info._format_funcs[format_](format_, val) + + if six.callable(format_): + format_func = lambda format_, val: format_(val) + try: + out = format_func(format_, val) + if not isinstance(out, six.string_types): + raise ValueError('Format function for value {0} returned {1} ' + 'instead of string type' + .format(val, type(val))) + except Exception as err: + # For a masked element, the format function call likely failed + # to handle it. Just return the string representation for now, + # and retry when a non-masked value comes along. + if val is np.ma.masked: + return str(val) + + raise ValueError('Format function for value {0} failed: {1}' + .format(val, err)) + # If the user-supplied function handles formatting masked elements, use + # it directly. Otherwise, wrap it in a function that traps them. + try: + format_func(format_, np.ma.masked) + except Exception: + format_func = _use_str_for_masked_values(format_func) + else: + # For a masked element, we cannot set string-based format functions yet, + # as all tests below will fail. Just return the string representation + # of masked for now, and retry when a non-masked value comes along. + if val is np.ma.masked: + return str(val) + + for format_func in possible_string_format_functions(format_): + try: + # Does this string format method work? + out = format_func(format_, val) + # Require that the format statement actually did something. + if out == format_: + raise ValueError('the format passed in did nothing.') + except Exception: + continue + else: + break + else: + # None of the possible string functions passed muster. + raise ValueError('Unable to parse format string {0}' + .format(format_)) + + # String-based format functions will fail on masked elements; + # wrap them in a function that traps them. + format_func = _use_str_for_masked_values(format_func) + + col.info._format_funcs[format_] = format_func + return out + + return _auto_format_func + + +class TableFormatter(object): + @staticmethod + def _get_pprint_size(max_lines=None, max_width=None): + """Get the output size (number of lines and character width) for Column and + Table pformat/pprint methods. + + If no value of ``max_lines`` is supplied then the height of the + screen terminal is used to set ``max_lines``. If the terminal + height cannot be determined then the default will be determined + using the ``astropy.table.conf.max_lines`` configuration item. If a + negative value of ``max_lines`` is supplied then there is no line + limit applied. + + The same applies for max_width except the configuration item is + ``astropy.table.conf.max_width``. + + Parameters + ---------- + max_lines : int or None + Maximum lines of output (header + data rows) + + max_width : int or None + Maximum width (characters) output + + Returns + ------- + max_lines, max_width : int + + """ + if max_lines is None: + max_lines = conf.max_lines + + if max_width is None: + max_width = conf.max_width + + if max_lines is None or max_width is None: + lines, width = terminal_size() + + if max_lines is None: + max_lines = lines + elif max_lines < 0: + max_lines = sys.maxsize + if max_lines < 8: + max_lines = 8 + + if max_width is None: + max_width = width + elif max_width < 0: + max_width = sys.maxsize + if max_width < 10: + max_width = 10 + + return max_lines, max_width + + def _pformat_col(self, col, max_lines=None, show_name=True, show_unit=None, + show_dtype=False, show_length=None, html=False, align=None): + """Return a list of formatted string representation of column values. + + Parameters + ---------- + max_lines : int + Maximum lines of output (header + data rows) + + show_name : bool + Include column name. Default is True. + + show_unit : bool + Include a header row for unit. Default is to show a row + for units only if one or more columns has a defined value + for the unit. + + show_dtype : bool + Include column dtype. Default is False. + + show_length : bool + Include column length at end. Default is to show this only + if the column is not shown completely. + + html : bool + Output column as HTML + + align : str + Left/right alignment of columns. Default is '>' (right) for all + columns. Other allowed values are '<', '^', and '0=' for left, + centered, and 0-padded, respectively. + + Returns + ------- + lines : list + List of lines with formatted column values + + outs : dict + Dict which is used to pass back additional values + defined within the iterator. + + """ + if show_unit is None: + show_unit = col.info.unit is not None + + outs = {} # Some values from _pformat_col_iter iterator that are needed here + col_strs_iter = self._pformat_col_iter(col, max_lines, show_name=show_name, + show_unit=show_unit, + show_dtype=show_dtype, + show_length=show_length, + outs=outs) + col_strs = list(col_strs_iter) + if len(col_strs) > 0: + col_width = max(len(x) for x in col_strs) + + if html: + from ..utils.xml.writer import xml_escape + n_header = outs['n_header'] + for i, col_str in enumerate(col_strs): + # _pformat_col output has a header line '----' which is not needed here + if i == n_header - 1: + continue + td = 'th' if i < n_header else 'td' + val = '<{0}>{1}'.format(td, xml_escape(col_str.strip()), td) + row = ('' + val + '') + if i < n_header: + row = ('' + row + '') + col_strs[i] = row + + if n_header > 0: + # Get rid of '---' header line + col_strs.pop(n_header - 1) + col_strs.insert(0, '
    ') + col_strs.append('
    ') + + # Now bring all the column string values to the same fixed width + else: + col_width = max(len(x) for x in col_strs) if col_strs else 1 + + # Center line header content and generate dashed headerline + for i in outs['i_centers']: + col_strs[i] = col_strs[i].center(col_width) + if outs['i_dashes'] is not None: + col_strs[outs['i_dashes']] = '-' * col_width + + # Format columns according to alignment. `align` arg has precedent, otherwise + # use `col.format` if it starts as a legal alignment string. If neither applies + # then right justify. + re_fill_align = re.compile(r'(?P.?)(?P[<^>=])') + match = None + if align: + # If there is an align specified then it must match + match = re_fill_align.match(align) + if not match: + raise ValueError("column align must be one of '<', '^', '>', or '='") + elif isinstance(col.info.format, six.string_types): + # col.info.format need not match, in which case rjust gets used + match = re_fill_align.match(col.info.format) + + if match: + fill_char = match.group('fill') + align_char = match.group('align') + if align_char == '=': + if fill_char != '0': + raise ValueError("fill character must be '0' for '=' align") + fill_char = '' # str.zfill gets used which does not take fill char arg + else: + fill_char = '' + align_char = '>' + + justify_methods = {'<': 'ljust', '^': 'center', '>': 'rjust', '=': 'zfill'} + justify_method = justify_methods[align_char] + justify_args = (col_width, fill_char) if fill_char else (col_width,) + + for i, col_str in enumerate(col_strs): + col_strs[i] = getattr(col_str, justify_method)(*justify_args) + + if outs['show_length']: + col_strs.append('Length = {0} rows'.format(len(col))) + + return col_strs, outs + + def _pformat_col_iter(self, col, max_lines, show_name, show_unit, outs, + show_dtype=False, show_length=None): + """Iterator which yields formatted string representation of column values. + + Parameters + ---------- + max_lines : int + Maximum lines of output (header + data rows) + + show_name : bool + Include column name. Default is True. + + show_unit : bool + Include a header row for unit. Default is to show a row + for units only if one or more columns has a defined value + for the unit. + + outs : dict + Must be a dict which is used to pass back additional values + defined within the iterator. + + show_dtype : bool + Include column dtype. Default is False. + + show_length : bool + Include column length at end. Default is to show this only + if the column is not shown completely. + """ + max_lines, _ = self._get_pprint_size(max_lines, -1) + + multidims = getattr(col, 'shape', [0])[1:] + if multidims: + multidim0 = tuple(0 for n in multidims) + multidim1 = tuple(n - 1 for n in multidims) + trivial_multidims = np.prod(multidims) == 1 + + i_dashes = None + i_centers = [] # Line indexes where content should be centered + n_header = 0 + if show_name: + i_centers.append(n_header) + # Get column name (or 'None' if not set) + col_name = six.text_type(col.info.name) + if multidims: + col_name += ' [{0}]'.format( + ','.join(six.text_type(n) for n in multidims)) + n_header += 1 + yield col_name + if show_unit: + i_centers.append(n_header) + n_header += 1 + yield six.text_type(col.info.unit or '') + if show_dtype: + i_centers.append(n_header) + n_header += 1 + try: + dtype = dtype_info_name(col.dtype) + except AttributeError: + dtype = 'object' + yield six.text_type(dtype) + if show_unit or show_name or show_dtype: + i_dashes = n_header + n_header += 1 + yield '---' + + max_lines -= n_header + n_print2 = max_lines // 2 + n_rows = len(col) + + # This block of code is responsible for producing the function that + # will format values for this column. The ``format_func`` function + # takes two args (col_format, val) and returns the string-formatted + # version. Some points to understand: + # + # - col_format could itself be the formatting function, so it will + # actually end up being called with itself as the first arg. In + # this case the function is expected to ignore its first arg. + # + # - auto_format_func is a function that gets called on the first + # column value that is being formatted. It then determines an + # appropriate formatting function given the actual value to be + # formatted. This might be deterministic or it might involve + # try/except. The latter allows for different string formatting + # options like %f or {:5.3f}. When auto_format_func is called it: + + # 1. Caches the function in the _format_funcs dict so for subsequent + # values the right function is called right away. + # 2. Returns the formatted value. + # + # - possible_string_format_functions is a function that yields a + # succession of functions that might successfully format the + # value. There is a default, but Mixin methods can override this. + # See Quantity for an example. + # + # - get_auto_format_func() returns a wrapped version of auto_format_func + # with the column id and possible_string_format_functions as + # enclosed variables. + col_format = col.info.format or getattr(col.info, 'default_format', None) + pssf = (getattr(col.info, 'possible_string_format_functions', None) or + _possible_string_format_functions) + auto_format_func = get_auto_format_func(col, pssf) + format_func = col.info._format_funcs.get(col_format, auto_format_func) + + if len(col) > max_lines: + if show_length is None: + show_length = True + i0 = n_print2 - (1 if show_length else 0) + i1 = n_rows - n_print2 - max_lines % 2 + ii = np.concatenate([np.arange(0, i0 + 1), np.arange(i1 + 1, len(col))]) + else: + i0 = -1 + ii = np.arange(len(col)) + + # Add formatted values if within bounds allowed by max_lines + for i in ii: + if i == i0: + yield '...' + else: + if multidims: + # Prevents columns like Column(data=[[(1,)],[(2,)]], name='a') + # with shape (n,1,...,1) from being printed as if there was + # more than one element in a row + if trivial_multidims: + col_str = format_func(col_format, col[(i,) + multidim0]) + else: + col_str = (format_func(col_format, col[(i,) + multidim0]) + + ' .. ' + + format_func(col_format, col[(i,) + multidim1])) + else: + col_str = format_func(col_format, col[i]) + yield col_str + + outs['show_length'] = show_length + outs['n_header'] = n_header + outs['i_centers'] = i_centers + outs['i_dashes'] = i_dashes + + def _pformat_table(self, table, max_lines=None, max_width=None, + show_name=True, show_unit=None, show_dtype=False, + html=False, tableid=None, tableclass=None, align=None): + """Return a list of lines for the formatted string representation of + the table. + + Parameters + ---------- + max_lines : int or None + Maximum number of rows to output + + max_width : int or None + Maximum character width of output + + show_name : bool + Include a header row for column names. Default is True. + + show_unit : bool + Include a header row for unit. Default is to show a row + for units only if one or more columns has a defined value + for the unit. + + show_dtype : bool + Include a header row for column dtypes. Default is False. + + html : bool + Format the output as an HTML table. Default is False. + + tableid : str or None + An ID tag for the table; only used if html is set. Default is + "table{id}", where id is the unique integer id of the table object, + id(table) + + tableclass : str or list of str or `None` + CSS classes for the table; only used if html is set. Default is + none + + align : str or list or tuple + Left/right alignment of columns. Default is '>' (right) for all + columns. Other allowed values are '<', '^', and '0=' for left, + centered, and 0-padded, respectively. A list of strings can be + provided for alignment of tables with multiple columns. + + Returns + ------- + rows : list + Formatted table as a list of strings + + outs : dict + Dict which is used to pass back additional values + defined within the iterator. + + """ + # "Print" all the values into temporary lists by column for subsequent + # use and to determine the width + max_lines, max_width = self._get_pprint_size(max_lines, max_width) + cols = [] + + if show_unit is None: + show_unit = any(col.info.unit for col in six.itervalues(table.columns)) + + # Coerce align into a correctly-sized list of alignments (if possible) + n_cols = len(table.columns) + if align is None or isinstance(align, six.string_types): + align = [align] * n_cols + + elif isinstance(align, (list, tuple)): + if len(align) != n_cols: + raise ValueError('got {0} alignment values instead of ' + 'the number of columns ({1})' + .format(len(align), n_cols)) + else: + raise TypeError('align keyword must be str or list or tuple (got {0})' + .format(type(align))) + + for align_, col in zip(align, table.columns.values()): + lines, outs = self._pformat_col(col, max_lines, show_name=show_name, + show_unit=show_unit, show_dtype=show_dtype, + align=align_) + if outs['show_length']: + lines = lines[:-1] + cols.append(lines) + + if not cols: + return [''], {'show_length': False} + + # Use the values for the last column since they are all the same + n_header = outs['n_header'] + + n_rows = len(cols[0]) + outwidth = lambda cols: sum(len(c[0]) for c in cols) + len(cols) - 1 + dots_col = ['...'] * n_rows + middle = len(cols) // 2 + while outwidth(cols) > max_width: + if len(cols) == 1: + break + if len(cols) == 2: + cols[1] = dots_col + break + if cols[middle] is dots_col: + cols.pop(middle) + middle = len(cols) // 2 + cols[middle] = dots_col + + # Now "print" the (already-stringified) column values into a + # row-oriented list. + rows = [] + if html: + from ..utils.xml.writer import xml_escape + + if tableid is None: + tableid = 'table{id}'.format(id=id(table)) + + if tableclass is not None: + if isinstance(tableclass, list): + tableclass = ' '.join(tableclass) + rows.append(''.format( + tid=tableid, tcls=tableclass)) + else: + rows.append('
    '.format(tid=tableid)) + + for i in range(n_rows): + # _pformat_col output has a header line '----' which is not needed here + if i == n_header - 1: + continue + td = 'th' if i < n_header else 'td' + vals = ('<{0}>{1}'.format(td, xml_escape(col[i].strip()), td) + for col in cols) + row = ('' + ''.join(vals) + '') + if i < n_header: + row = ('' + row + '') + rows.append(row) + rows.append('
    ') + else: + for i in range(n_rows): + row = ' '.join(col[i] for col in cols) + rows.append(row) + + return rows, outs + + def _more_tabcol(self, tabcol, max_lines=None, max_width=None, + show_name=True, show_unit=None, show_dtype=False): + """Interactive "more" of a table or column. + + Parameters + ---------- + max_lines : int or None + Maximum number of rows to output + + max_width : int or None + Maximum character width of output + + show_name : bool + Include a header row for column names. Default is True. + + show_unit : bool + Include a header row for unit. Default is to show a row + for units only if one or more columns has a defined value + for the unit. + + show_dtype : bool + Include a header row for column dtypes. Default is False. + """ + allowed_keys = 'f br<>qhpn' + + # Count the header lines + n_header = 0 + if show_name: + n_header += 1 + if show_unit: + n_header += 1 + if show_dtype: + n_header += 1 + if show_name or show_unit or show_dtype: + n_header += 1 + + # Set up kwargs for pformat call. Only Table gets max_width. + kwargs = dict(max_lines=-1, show_name=show_name, show_unit=show_unit, + show_dtype=show_dtype) + if hasattr(tabcol, 'columns'): # tabcol is a table + kwargs['max_width'] = max_width + + # If max_lines is None (=> query screen size) then increase by 2. + # This is because get_pprint_size leaves 6 extra lines so that in + # ipython you normally see the last input line. + max_lines1, max_width = self._get_pprint_size(max_lines, max_width) + if max_lines is None: + max_lines1 += 2 + delta_lines = max_lines1 - n_header + + # Set up a function to get a single character on any platform + inkey = Getch() + + i0 = 0 # First table/column row to show + showlines = True + while True: + i1 = i0 + delta_lines # Last table/col row to show + if showlines: # Don't always show the table (e.g. after help) + try: + os.system('cls' if os.name == 'nt' else 'clear') + except Exception: + pass # No worries if clear screen call fails + lines = tabcol[i0:i1].pformat(**kwargs) + colors = ('red' if i < n_header else 'default' + for i in range(len(lines))) + for color, line in zip(colors, lines): + color_print(line, color) + showlines = True + print() + print("-- f, , b, r, p, n, <, >, q h (help) --", end=' ') + # Get a valid key + while True: + try: + key = inkey().lower() + except Exception: + print("\n") + log.error('Console does not support getting a character' + ' as required by more(). Use pprint() instead.') + return + if key in allowed_keys: + break + print(key) + + if key.lower() == 'q': + break + elif key == ' ' or key == 'f': + i0 += delta_lines + elif key == 'b': + i0 = i0 - delta_lines + elif key == 'r': + pass + elif key == '<': + i0 = 0 + elif key == '>': + i0 = len(tabcol) + elif key == 'p': + i0 -= 1 + elif key == 'n': + i0 += 1 + elif key == 'h': + showlines = False + print(""" + Browsing keys: + f, : forward one page + b : back one page + r : refresh same page + n : next row + p : previous row + < : go to beginning + > : go to end + q : quit browsing + h : print this help""", end=' ') + if i0 < 0: + i0 = 0 + if i0 >= len(tabcol) - delta_lines: + i0 = len(tabcol) - delta_lines + print("\n") diff --git a/astropy/table/row.py b/astropy/table/row.py new file mode 100644 index 0000000..64034bc --- /dev/null +++ b/astropy/table/row.py @@ -0,0 +1,176 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import collections + +import numpy as np + +from ..extern import six + + +class Row(object): + """A class to represent one row of a Table object. + + A Row object is returned when a Table object is indexed with an integer + or when iterating over a table:: + + >>> from astropy.table import Table + >>> table = Table([(1, 2), (3, 4)], names=('a', 'b'), + ... dtype=('int32', 'int32')) + >>> row = table[1] + >>> row + + a b + int32 int32 + ----- ----- + 2 4 + >>> row['a'] + 2 + >>> row[1] + 4 + """ + + def __init__(self, table, index): + self._table = table + self._index = index + + n = len(table) + if index < -n or index >= n: + raise IndexError('index {0} out of range for table with length {1}' + .format(index, len(table))) + + def __getitem__(self, item): + return self._table.columns[item][self._index] + + def __setitem__(self, item, val): + self._table.columns[item][self._index] = val + + def __eq__(self, other): + if self._table.masked: + # Sent bug report to numpy-discussion group on 2012-Oct-21, subject: + # "Comparing rows in a structured masked array raises exception" + # No response, so this is still unresolved. + raise ValueError('Unable to compare rows for masked table due to numpy.ma bug') + return self.as_void() == other + + def __ne__(self, other): + if self._table.masked: + raise ValueError('Unable to compare rows for masked table due to numpy.ma bug') + return self.as_void() != other + + def __array__(self, dtype=None): + """Support converting Row to np.array via np.array(table). + + Coercion to a different dtype via np.array(table, dtype) is not + supported and will raise a ValueError. + + If the parent table is masked then the mask information is dropped. + """ + if dtype is not None: + raise ValueError('Datatype coercion is not allowed') + + return np.asarray(self.as_void()) + + def __len__(self): + return len(self._table.columns) + + def __iter__(self): + index = self._index + for col in six.itervalues(self._table.columns): + yield col[index] + + @property + def table(self): + return self._table + + @property + def index(self): + return self._index + + def as_void(self): + """ + Returns a *read-only* copy of the row values in the form of np.void or + np.ma.mvoid objects. This corresponds to the object types returned for + row indexing of a pure numpy structured array or masked array. This + method is slow and its use is discouraged when possible. + + Returns + ------- + void_row : np.void (unmasked) or np.ma.mvoid (masked) + Copy of row values + """ + index = self._index + cols = self._table.columns.values() + vals = tuple(np.asarray(col)[index] for col in cols) + if self._table.masked: + # The logic here is a little complicated to work around + # bug in numpy < 1.8 (numpy/numpy#483). Need to build up + # a np.ma.mvoid object by hand. + from .table import descr + + # Make np.void version of masks. Use the table dtype but + # substitute bool for data type + masks = tuple(col.mask[index] if hasattr(col, 'mask') else False + for col in cols) + descrs = (descr(col) for col in cols) + mask_dtypes = [(name, np.bool, shape) for name, type_, shape in descrs] + row_mask = np.array([masks], dtype=mask_dtypes)[0] + + # Make np.void version of values, and then the final mvoid row + row_vals = np.array([vals], dtype=self.dtype)[0] + void_row = np.ma.mvoid(data=row_vals, mask=row_mask) + else: + void_row = np.array([vals], dtype=self.dtype)[0] + return void_row + + @property + def meta(self): + return self._table.meta + + @property + def columns(self): + return self._table.columns + + @property + def colnames(self): + return self._table.colnames + + @property + def dtype(self): + return self._table.dtype + + def _base_repr_(self, html=False): + """ + Display row as a single-line table but with appropriate header line. + """ + index = self.index if (self.index >= 0) else self.index + len(self._table) + table = self._table[index:index + 1] + descr_vals = [self.__class__.__name__, + 'index={0}'.format(self.index)] + if table.masked: + descr_vals.append('masked=True') + + return table._base_repr_(html, descr_vals, max_width=-1, + tableid='table{0}'.format(id(self._table))) + + def _repr_html_(self): + return self._base_repr_(html=True) + + def __repr__(self): + return self._base_repr_(html=False) + + def __unicode__(self): + index = self.index if (self.index >= 0) else self.index + len(self._table) + return '\n'.join(self.table[index:index + 1].pformat(max_width=-1)) + if not six.PY2: + __str__ = __unicode__ + + def __bytes__(self): + return six.text_type(self).encode('utf-8') + if six.PY2: + __str__ = __bytes__ + + +collections.Sequence.register(Row) diff --git a/astropy/table/serialize.py b/astropy/table/serialize.py new file mode 100644 index 0000000..a66c2ed --- /dev/null +++ b/astropy/table/serialize.py @@ -0,0 +1,211 @@ +from importlib import import_module +import re +from copy import deepcopy + +from ..utils.data_info import MixinInfo +from .column import Column +from .table import Table, QTable, has_info_class +from ..units.quantity import QuantityInfo + + +__construct_mixin_classes = ('astropy.time.core.Time', + 'astropy.time.core.TimeDelta', + 'astropy.units.quantity.Quantity', + 'astropy.coordinates.angles.Latitude', + 'astropy.coordinates.angles.Longitude', + 'astropy.coordinates.angles.Angle', + 'astropy.coordinates.distances.Distance', + 'astropy.coordinates.earth.EarthLocation', + 'astropy.coordinates.sky_coordinate.SkyCoord', + 'astropy.table.table.NdarrayMixin') + + +class SerializedColumn(dict): + """ + Subclass of dict that is a used in the representation to contain the name + (and possible other info) for a mixin attribute (either primary data or an + array-like attribute) that is serialized as a column in the table. + + Normally contains the single key ``name`` with the name of the column in the + table. + """ + pass + + +def _represent_mixin_as_column(col, name, new_cols, mixin_cols): + """Convert a mixin column to a plain columns or a set of mixin columns.""" + if not has_info_class(col, MixinInfo): + new_cols.append(col) + return + + # Subtlety here is handling mixin info attributes. The basic list of such + # attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'. + # - name: handled directly [DON'T store] + # - unit: DON'T store if this is a parent attribute + # - dtype: captured in plain Column if relevant [DON'T store] + # - format: possibly irrelevant but settable post-object creation [DO store] + # - description: DO store + # - meta: DO store + info = {} + for attr, nontrivial, xform in (('unit', lambda x: x not in (None, ''), str), + ('format', lambda x: x is not None, None), + ('description', lambda x: x is not None, None), + ('meta', lambda x: x, None)): + col_attr = getattr(col.info, attr) + if nontrivial(col_attr): + info[attr] = xform(col_attr) if xform else col_attr + + obj_attrs = col.info._represent_as_dict() + ordered_keys = col.info._represent_as_dict_attrs + + data_attrs = [key for key in ordered_keys if key in obj_attrs and + getattr(obj_attrs[key], 'shape', ())[:1] == col.shape[:1]] + + for data_attr in data_attrs: + data = obj_attrs[data_attr] + if len(data_attrs) == 1 and not has_info_class(data, MixinInfo): + # For one non-mixin attribute, we need only one serialized column. + # We can store info there, and keep the column name as is. + new_cols.append(Column(data, name=name, **info)) + obj_attrs[data_attr] = SerializedColumn({'name': name}) + # Remove attributes that are already on the serialized column. + for attr in info: + if attr in obj_attrs: + del obj_attrs[attr] + + else: + # New column name combines the old name and attribute + # (e.g. skycoord.ra, skycoord.dec). + new_name = name + '.' + data_attr + # TODO masking, MaskedColumn + if not has_info_class(data, MixinInfo): + new_cols.append(Column(data, name=new_name)) + obj_attrs[data_attr] = SerializedColumn({'name': new_name}) + else: + # recurse. This will define obj_attrs[new_name]. + _represent_mixin_as_column(data, new_name, new_cols, obj_attrs) + obj_attrs[data_attr] = SerializedColumn(obj_attrs.pop(new_name)) + + # Strip out from info any attributes defined by the parent + for attr in col.info.attrs_from_parent: + if attr in info: + del info[attr] + + if info: + obj_attrs['__info__'] = info + + # Store the fully qualified class name + obj_attrs['__class__'] = col.__module__ + '.' + col.__class__.__name__ + + mixin_cols[name] = obj_attrs + + +def _represent_mixins_as_columns(tbl): + """ + Convert any mixin columns to plain Column or MaskedColumn and + return a new table. + """ + if not tbl.has_mixin_columns: + return tbl + + mixin_cols = {} + + new_cols = [] + + for col in tbl.itercols(): + _represent_mixin_as_column(col, col.info.name, new_cols, mixin_cols) + + meta = deepcopy(tbl.meta) + meta['__serialized_columns__'] = mixin_cols + out = Table(new_cols, meta=meta, copy=False) + + return out + + +def _construct_mixin_from_obj_attrs_and_info(obj_attrs, info): + cls_full_name = obj_attrs.pop('__class__') + + # If this is a supported class then import the class and run + # the _construct_from_col method. Prevent accidentally running + # untrusted code by only importing known astropy classes. + if cls_full_name not in __construct_mixin_classes: + raise ValueError('unsupported class for construct {}'.format(cls_full_name)) + + mod_name, cls_name = re.match(r'(.+)\.(\w+)', cls_full_name).groups() + module = import_module(mod_name) + cls = getattr(module, cls_name) + for attr, value in info.items(): + if attr in cls.info.attrs_from_parent: + obj_attrs[attr] = value + mixin = cls.info._construct_from_dict(obj_attrs) + for attr, value in info.items(): + if attr not in obj_attrs: + setattr(mixin.info, attr, value) + return mixin + + +def _construct_mixin_from_columns(new_name, obj_attrs, out): + data_attrs_map = {} + for name, val in obj_attrs.items(): + if isinstance(val, SerializedColumn): + if 'name' in val: + data_attrs_map[val['name']] = name + else: + _construct_mixin_from_columns(name, val, out) + data_attrs_map[name] = name + + for name in data_attrs_map.values(): + del obj_attrs[name] + + # Get the index where to add new column + idx = min(out.colnames.index(name) for name in data_attrs_map) + + # Name is the column name in the table (e.g. "coord.ra") and + # data_attr is the object attribute name (e.g. "ra"). A different + # example would be a formatted time object that would have (e.g.) + # "time_col" and "value", respectively. + for name, data_attr in data_attrs_map.items(): + col = out[name] + obj_attrs[data_attr] = col + del out[name] + + info = obj_attrs.pop('__info__', {}) + if len(data_attrs_map) == 1: + # col is the first and only serialized column; in that case, use info + # stored on the column. + for attr, nontrivial in (('unit', lambda x: x not in (None, '')), + ('format', lambda x: x is not None), + ('description', lambda x: x is not None), + ('meta', lambda x: x)): + col_attr = getattr(col.info, attr) + if nontrivial(col_attr): + info[attr] = col_attr + + info['name'] = new_name + col = _construct_mixin_from_obj_attrs_and_info(obj_attrs, info) + out.add_column(col, index=idx) + + +def _construct_mixins_from_columns(tbl): + if '__serialized_columns__' not in tbl.meta: + return tbl + + # Don't know final output class but assume QTable so no columns get + # downgraded. + out = QTable(tbl, copy=False) + + mixin_cols = out.meta.pop('__serialized_columns__') + + for new_name, obj_attrs in mixin_cols.items(): + _construct_mixin_from_columns(new_name, obj_attrs, out) + + # If no quantity subclasses are in the output then output as Table. + # For instance ascii.read(file, format='ecsv') doesn't specify an + # output class and should return the minimal table class that + # represents the table file. + has_quantities = any(isinstance(col.info, QuantityInfo) + for col in out.itercols()) + if not has_quantities: + out = Table(out, copy=False) + + return out diff --git a/astropy/table/setup_package.py b/astropy/table/setup_package.py new file mode 100644 index 0000000..a807ae1 --- /dev/null +++ b/astropy/table/setup_package.py @@ -0,0 +1,25 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import absolute_import + +import os +from distutils.extension import Extension + +ROOT = os.path.relpath(os.path.dirname(__file__)) + + +def get_extensions(): + sources = ["_np_utils.pyx", "_column_mixins.pyx"] + include_dirs = ['numpy'] + + exts = [ + Extension(name='astropy.table.' + os.path.splitext(source)[0], + sources=[os.path.join(ROOT, source)], + include_dirs=include_dirs) + for source in sources + ] + + return exts + + +def requires_2to3(): + return False diff --git a/astropy/table/sorted_array.py b/astropy/table/sorted_array.py new file mode 100644 index 0000000..7c58b03 --- /dev/null +++ b/astropy/table/sorted_array.py @@ -0,0 +1,317 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) +import numpy as np +from ..extern.six.moves import range, zip + + +def _searchsorted(array, val, side='left'): + ''' + Call np.searchsorted or use a custom binary + search if necessary. + ''' + if hasattr(array, 'searchsorted'): + return array.searchsorted(val, side=side) + # Python binary search + begin = 0 + end = len(array) + while begin < end: + mid = (begin + end) // 2 + if val > array[mid]: + begin = mid + 1 + elif val < array[mid]: + end = mid + elif side == 'right': + begin = mid + 1 + else: + end = mid + return begin + + +class SortedArray(object): + ''' + Implements a sorted array container using + a list of numpy arrays. + + Parameters + ---------- + data : Table + Sorted columns of the original table + row_index : Column object + Row numbers corresponding to data columns + unique : bool (defaults to False) + Whether the values of the index must be unique + ''' + + def __init__(self, data, row_index, unique=False): + self.data = data + self.row_index = row_index + self.num_cols = len(getattr(data, 'colnames', [])) + self.unique = unique + + @property + def cols(self): + return self.data.columns.values() + + def add(self, key, row): + ''' + Add a new entry to the sorted array. + + Parameters + ---------- + key : tuple + Column values at the given row + row : int + Row number + ''' + pos = self.find_pos(key, row) # first >= key + + if self.unique and 0 <= pos < len(self.row_index) and \ + all(self.data[pos][i] == key[i] for i in range(len(key))): + # already exists + raise ValueError('Cannot add duplicate value "{0}" in a ' + 'unique index'.format(key)) + self.data.insert_row(pos, key) + self.row_index = self.row_index.insert(pos, row) + + def _get_key_slice(self, i, begin, end): + ''' + Retrieve the ith slice of the sorted array + from begin to end. + ''' + if i < self.num_cols: + return self.cols[i][begin:end] + else: + return self.row_index[begin:end] + + def find_pos(self, key, data, exact=False): + ''' + Return the index of the largest key in data greater than or + equal to the given key, data pair. + + Parameters + ---------- + key : tuple + Column key + data : int + Row number + exact : bool + If True, return the index of the given key in data + or -1 if the key is not present. + ''' + begin = 0 + end = len(self.row_index) + num_cols = self.num_cols + if not self.unique: + # consider the row value as well + key = key + (data,) + num_cols += 1 + + # search through keys in lexicographic order + for i in range(num_cols): + key_slice = self._get_key_slice(i, begin, end) + t = _searchsorted(key_slice, key[i]) + # t is the smallest index >= key[i] + if exact and (t == len(key_slice) or key_slice[t] != key[i]): + # no match + return -1 + elif t == len(key_slice) or (t == 0 and len(key_slice) > 0 and + key[i] < key_slice[0]): + # too small or too large + return begin + t + end = begin + _searchsorted(key_slice, key[i], side='right') + begin += t + if begin >= len(self.row_index): # greater than all keys + return begin + + return begin + + def find(self, key): + ''' + Find all rows matching the given key. + + Parameters + ---------- + key : tuple + Column values + + Returns + ------- + matching_rows : list + List of rows matching the input key + ''' + begin = 0 + end = len(self.row_index) + + # search through keys in lexicographic order + for i in range(self.num_cols): + key_slice = self._get_key_slice(i, begin, end) + t = _searchsorted(key_slice, key[i]) + # t is the smallest index >= key[i] + if t == len(key_slice) or key_slice[t] != key[i]: + # no match + return [] + elif t == 0 and len(key_slice) > 0 and key[i] < key_slice[0]: + # too small or too large + return [] + end = begin + _searchsorted(key_slice, key[i], side='right') + begin += t + if begin >= len(self.row_index): # greater than all keys + return [] + + return self.row_index[begin:end] + + def range(self, lower, upper, bounds): + ''' + Find values in the given range. + + Parameters + ---------- + lower : tuple + Lower search bound + upper : tuple + Upper search bound + bounds : tuple (x, y) of bools + Indicates whether the search should be inclusive or + exclusive with respect to the endpoints. The first + argument x corresponds to an inclusive lower bound, + and the second argument y to an inclusive upper bound. + ''' + lower_pos = self.find_pos(lower, 0) + upper_pos = self.find_pos(upper, 0) + if lower_pos == len(self.row_index): + return [] + + lower_bound = tuple([col[lower_pos] for col in self.cols]) + if not bounds[0] and lower_bound == lower: + lower_pos += 1 # data[lower_pos] > lower + + # data[lower_pos] >= lower + # data[upper_pos] >= upper + if upper_pos < len(self.row_index): + upper_bound = tuple([col[upper_pos] for col in self.cols]) + if not bounds[1] and upper_bound == upper: + upper_pos -= 1 # data[upper_pos] < upper + elif upper_bound > upper: + upper_pos -= 1 # data[upper_pos] <= upper + return self.row_index[lower_pos:upper_pos + 1] + + def remove(self, key, data): + ''' + Remove the given entry from the sorted array. + + Parameters + ---------- + key : tuple + Column values + data : int + Row number + + Returns + ------- + successful : bool + Whether the entry was successfully removed + ''' + pos = self.find_pos(key, data, exact=True) + if pos == -1: # key not found + return False + + self.data.remove_row(pos) + keep_mask = np.ones(len(self.row_index), dtype=np.bool) + keep_mask[pos] = False + self.row_index = self.row_index[keep_mask] + return True + + def shift_left(self, row): + ''' + Decrement all row numbers greater than the input row. + + Parameters + ---------- + row : int + Input row number + ''' + self.row_index[self.row_index > row] -= 1 + + def shift_right(self, row): + ''' + Increment all row numbers greater than or equal to the input row. + + Parameters + ---------- + row : int + Input row number + ''' + self.row_index[self.row_index >= row] += 1 + + def replace_rows(self, row_map): + ''' + Replace all rows with the values they map to in the + given dictionary. Any rows not present as keys in + the dictionary will have their entries deleted. + + Parameters + ---------- + row_map : dict + Mapping of row numbers to new row numbers + ''' + num_rows = len(row_map) + keep_rows = np.zeros(len(self.row_index), dtype=np.bool) + tagged = 0 + for i, row in enumerate(self.row_index): + if row in row_map: + keep_rows[i] = True + tagged += 1 + if tagged == num_rows: + break + + self.data = self.data[keep_rows] + self.row_index = np.array( + [row_map[x] for x in self.row_index[keep_rows]]) + + def items(self): + ''' + Retrieve all array items as a list of pairs of the form + [(key, [row 1, row 2, ...]), ...] + ''' + array = [] + last_key = None + for i, key in enumerate(zip(*self.data.columns.values())): + row = self.row_index[i] + if key == last_key: + array[-1][1].append(row) + else: + last_key = key + array.append((key, [row])) + return array + + def sort(self): + ''' + Make row order align with key order. + ''' + self.row_index = np.arange(len(self.row_index)) + + def sorted_data(self): + ''' + Return rows in sorted order. + ''' + return self.row_index + + def __getitem__(self, item): + ''' + Return a sliced reference to this sorted array. + + Parameters + ---------- + item : slice + Slice to use for referencing + ''' + return SortedArray(self.data[item], self.row_index[item]) + + def __repr__(self): + t = self.data.copy() + t['rows'] = self.row_index + return str(t) + + def __str__(self): + return repr(self) diff --git a/astropy/table/table.py b/astropy/table/table.py new file mode 100644 index 0000000..8a4073b --- /dev/null +++ b/astropy/table/table.py @@ -0,0 +1,2852 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ..extern import six +from ..extern.six.moves import zip, range +from .index import TableIndices, TableLoc, TableILoc + +import re +import sys +from collections import OrderedDict, Mapping +import warnings +from copy import deepcopy + +import numpy as np +from numpy import ma + +from .. import log +from ..io import registry as io_registry +from ..units import Quantity, QuantityInfo +from ..utils import isiterable, ShapedLikeNDArray +from ..utils.compat.numpy import broadcast_to as np_broadcast_to +from ..utils.console import color_print +from ..utils.metadata import MetaData +from ..utils.data_info import BaseColumnInfo, MixinInfo, ParentDtypeInfo, DataInfo +from . import groups +from .pprint import TableFormatter +from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray, + col_copy) +from .row import Row +from .np_utils import fix_column_name, recarray_fromrecords +from .info import TableInfo +from .index import Index, _IndexModeContext, get_index +from . import conf + + +__doctest_skip__ = ['Table.read', 'Table.write', + 'Table.convert_bytestring_to_unicode', + 'Table.convert_unicode_to_bytestring', + ] + + +class TableReplaceWarning(UserWarning): + """ + Warning class for cases when a table column is replaced via the + Table.__setitem__ syntax e.g. t['a'] = val. + + This does not inherit from AstropyWarning because we want to use + stacklevel=3 to show the user where the issue occurred in their code. + """ + pass + + +def descr(col): + """Array-interface compliant full description of a column. + + This returns a 3-tuple (name, type, shape) that can always be + used in a structured array dtype definition. + """ + col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype + col_shape = col.shape[1:] if hasattr(col, 'shape') else () + return (col.info.name, col_dtype, col_shape) + + +def has_info_class(obj, cls): + return hasattr(obj, 'info') and isinstance(obj.info, cls) + + +class TableColumns(OrderedDict): + """OrderedDict subclass for a set of columns. + + This class enhances item access to provide convenient access to columns + by name or index, including slice access. It also handles renaming + of columns. + + The initialization argument ``cols`` can be a list of ``Column`` objects + or any structure that is valid for initializing a Python dict. This + includes a dict, list of (key, val) tuples or [key, val] lists, etc. + + Parameters + ---------- + cols : dict, list, tuple; optional + Column objects as data structure that can init dict (see above) + """ + + def __init__(self, cols={}): + if isinstance(cols, (list, tuple)): + # `cols` should be a list of two-tuples, but it is allowed to have + # columns (BaseColumn or mixins) in the list. + newcols = [] + for col in cols: + if has_info_class(col, BaseColumnInfo): + newcols.append((col.info.name, col)) + else: + newcols.append(col) + cols = newcols + super(TableColumns, self).__init__(cols) + + def __getitem__(self, item): + """Get items from a TableColumns object. + :: + + tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')]) + tc['a'] # Column('a') + tc[1] # Column('b') + tc['a', 'b'] # + tc[1:3] # + """ + if isinstance(item, six.string_types): + return OrderedDict.__getitem__(self, item) + elif isinstance(item, (int, np.integer)): + return self.values()[item] + elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'): + return self.values()[item.item()] + elif isinstance(item, tuple): + return self.__class__([self[x] for x in item]) + elif isinstance(item, slice): + return self.__class__([self[x] for x in list(self)[item]]) + else: + raise IndexError('Illegal key or index value for {} object' + .format(self.__class__.__name__)) + + def __setitem__(self, item, value): + if item in self: + raise ValueError("Cannot replace column '{0}'. Use Table.replace_column() instead." + .format(item)) + super(TableColumns, self).__setitem__(item, value) + + def __repr__(self): + names = ("'{0}'".format(x) for x in six.iterkeys(self)) + return "<{1} names=({0})>".format(",".join(names), self.__class__.__name__) + + def _rename_column(self, name, new_name): + if name == new_name: + return + + if new_name in self: + raise KeyError("Column {0} already exists".format(new_name)) + + mapper = {name: new_name} + new_names = [mapper.get(name, name) for name in self] + cols = list(six.itervalues(self)) + self.clear() + self.update(list(zip(new_names, cols))) + + # Define keys and values for Python 2 and 3 source compatibility + def keys(self): + return list(OrderedDict.keys(self)) + + def values(self): + return list(OrderedDict.values(self)) + + def isinstance(self, cls): + """ + Return a list of columns which are instances of the specified classes. + + Parameters + ---------- + cls : class or tuple of classes + Column class (including mixin) or tuple of Column classes. + + Returns + ------- + col_list : list of Columns + List of Column objects which are instances of given classes. + """ + cols = [col for col in self.values() if isinstance(col, cls)] + return cols + + def not_isinstance(self, cls): + """ + Return a list of columns which are not instances of the specified classes. + + Parameters + ---------- + cls : class or tuple of classes + Column class (including mixin) or tuple of Column classes. + + Returns + ------- + col_list : list of Columns + List of Column objects which are not instances of given classes. + """ + cols = [col for col in self.values() if not isinstance(col, cls)] + return cols + + +class Table(object): + """A class to represent tables of heterogeneous data. + + `Table` provides a class for heterogeneous tabular data, making use of a + `numpy` structured array internally to store the data values. A key + enhancement provided by the `Table` class is the ability to easily modify + the structure of the table by adding or removing columns, or adding new + rows of data. In addition table and column metadata are fully supported. + + `Table` differs from `~astropy.nddata.NDData` by the assumption that the + input data consists of columns of homogeneous data, where each column + has a unique identifier and may contain additional metadata such as the + data unit, format, and description. + + Parameters + ---------- + data : numpy ndarray, dict, list, Table, or table-like object, optional + Data to initialize table. + masked : bool, optional + Specify whether the table is masked. + names : list, optional + Specify column names. + dtype : list, optional + Specify column data types. + meta : dict, optional + Metadata associated with the table. + copy : bool, optional + Copy the input data. If the input is a Table the ``meta`` is always + copied regardless of the ``copy`` parameter. + Default is True. + rows : numpy ndarray, list of lists, optional + Row-oriented data for table instead of ``data`` argument. + copy_indices : bool, optional + Copy any indices in the input data. Default is True. + **kwargs : dict, optional + Additional keyword args when converting table-like object. + """ + + meta = MetaData() + + # Define class attributes for core container objects to allow for subclass + # customization. + Row = Row + Column = Column + MaskedColumn = MaskedColumn + TableColumns = TableColumns + TableFormatter = TableFormatter + + def as_array(self, keep_byteorder=False): + """ + Return a new copy of the table in the form of a structured np.ndarray or + np.ma.MaskedArray object (as appropriate). + + Parameters + ---------- + keep_byteorder : bool, optional + By default the returned array has all columns in native byte + order. However, if this option is `True` this preserves the + byte order of all columns (if any are non-native). + + Returns + ------- + table_array : np.ndarray (unmasked) or np.ma.MaskedArray (masked) + Copy of table as a numpy structured array + """ + if len(self.columns) == 0: + return None + + sys_byteorder = ('>', '<')[sys.byteorder == 'little'] + native_order = ('=', sys_byteorder) + + dtype = [] + + cols = self.columns.values() + + for col in cols: + col_descr = descr(col) + byteorder = col.info.dtype.byteorder + + if not keep_byteorder and byteorder not in native_order: + new_dt = np.dtype(col_descr[1]).newbyteorder('=') + col_descr = (col_descr[0], new_dt, col_descr[2]) + + dtype.append(col_descr) + + empty_init = ma.empty if self.masked else np.empty + data = empty_init(len(self), dtype=dtype) + for col in cols: + # When assigning from one array into a field of a structured array, + # Numpy will automatically swap those columns to their destination + # byte order where applicable + data[col.info.name] = col + + return data + + def __init__(self, data=None, masked=None, names=None, dtype=None, + meta=None, copy=True, rows=None, copy_indices=True, + **kwargs): + + # Set up a placeholder empty table + self._set_masked(masked) + self.columns = self.TableColumns() + self.meta = meta + self.formatter = self.TableFormatter() + self._copy_indices = True # copy indices from this Table by default + self._init_indices = copy_indices # whether to copy indices in init + self.primary_key = None + + # Must copy if dtype are changing + if not copy and dtype is not None: + raise ValueError('Cannot specify dtype when copy=False') + + # Row-oriented input, e.g. list of lists or list of tuples, list of + # dict, Row instance. Set data to something that the subsequent code + # will parse correctly. + is_list_of_dict = False + if rows is not None: + if data is not None: + raise ValueError('Cannot supply both `data` and `rows` values') + if all(isinstance(row, dict) for row in rows): + is_list_of_dict = True # Avoid doing the all(...) test twice. + data = rows + elif isinstance(rows, self.Row): + data = rows + else: + rec_data = recarray_fromrecords(rows) + data = [rec_data[name] for name in rec_data.dtype.names] + + # Infer the type of the input data and set up the initialization + # function, number of columns, and potentially the default col names + + default_names = None + + if hasattr(data, '__astropy_table__'): + # Data object implements the __astropy_table__ interface method. + # Calling that method returns an appropriate instance of + # self.__class__ and respects the `copy` arg. The returned + # Table object should NOT then be copied (though the meta + # will be deep-copied anyway). + data = data.__astropy_table__(self.__class__, copy, **kwargs) + copy = False + elif kwargs: + raise TypeError('__init__() got unexpected keyword argument {!r}' + .format(list(kwargs.keys())[0])) + + if (isinstance(data, np.ndarray) and + data.shape == (0,) and + not data.dtype.names): + data = None + + if isinstance(data, self.Row): + data = data._table[data._index:data._index + 1] + + if isinstance(data, (list, tuple)): + init_func = self._init_from_list + if data and (is_list_of_dict or all(isinstance(row, dict) for row in data)): + n_cols = len(data[0]) + else: + n_cols = len(data) + + elif isinstance(data, np.ndarray): + if data.dtype.names: + init_func = self._init_from_ndarray # _struct + n_cols = len(data.dtype.names) + default_names = data.dtype.names + else: + init_func = self._init_from_ndarray # _homog + if data.shape == (): + raise ValueError('Can not initialize a Table with a scalar') + elif len(data.shape) == 1: + data = data[np.newaxis, :] + n_cols = data.shape[1] + + elif isinstance(data, Mapping): + init_func = self._init_from_dict + default_names = list(data) + n_cols = len(default_names) + + elif isinstance(data, Table): + init_func = self._init_from_table + n_cols = len(data.colnames) + default_names = data.colnames + # don't copy indices if the input Table is in non-copy mode + self._init_indices = self._init_indices and data._copy_indices + + elif data is None: + if names is None: + if dtype is None: + return # Empty table + try: + # No data nor names but dtype is available. This must be + # valid to initialize a structured array. + dtype = np.dtype(dtype) + names = dtype.names + dtype = [dtype[name] for name in names] + except Exception: + raise ValueError('dtype was specified but could not be ' + 'parsed for column names') + # names is guaranteed to be set at this point + init_func = self._init_from_list + n_cols = len(names) + data = [[]] * n_cols + + else: + raise ValueError('Data type {0} not allowed to init Table' + .format(type(data))) + + # Set up defaults if names and/or dtype are not specified. + # A value of None means the actual value will be inferred + # within the appropriate initialization routine, either from + # existing specification or auto-generated. + + if names is None: + names = default_names or [None] * n_cols + if dtype is None: + dtype = [None] * n_cols + + # Numpy does not support Unicode column names on Python 2, or + # bytes column names on Python 3, so fix them up now. + names = [fix_column_name(name) for name in names] + + self._check_names_dtype(names, dtype, n_cols) + + # Finally do the real initialization + init_func(data, names, dtype, n_cols, copy) + + # Whatever happens above, the masked property should be set to a boolean + if type(self.masked) is not bool: + raise TypeError("masked property has not been set to True or False") + + def __getstate__(self): + columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col)) + for key, col in self.columns.items()) + return (columns, self.meta) + + def __setstate__(self, state): + columns, meta = state + self.__init__(columns, meta=meta) + + @property + def mask(self): + # Dynamic view of available masks + if self.masked: + mask_table = Table([col.mask for col in self.columns.values()], + names=self.colnames, copy=False) + + # Set hidden attribute to force inplace setitem so that code like + # t.mask['a'] = [1, 0, 1] will correctly set the underlying mask. + # See #5556 for discussion. + mask_table._setitem_inplace = True + else: + mask_table = None + + return mask_table + + @mask.setter + def mask(self, val): + self.mask[:] = val + + @property + def _mask(self): + """This is needed so that comparison of a masked Table and a + MaskedArray works. The requirement comes from numpy.ma.core + so don't remove this property.""" + return self.as_array().mask + + def filled(self, fill_value=None): + """Return a copy of self, with masked values filled. + + If input ``fill_value`` supplied then that value is used for all + masked entries in the table. Otherwise the individual + ``fill_value`` defined for each table column is used. + + Parameters + ---------- + fill_value : str + If supplied, this ``fill_value`` is used for all masked entries + in the entire table. + + Returns + ------- + filled_table : Table + New table with masked values filled + """ + if self.masked: + data = [col.filled(fill_value) for col in six.itervalues(self.columns)] + else: + data = self + return self.__class__(data, meta=deepcopy(self.meta)) + + @property + def indices(self): + ''' + Return the indices associated with columns of the table + as a TableIndices object. + ''' + lst = [] + for column in self.columns.values(): + for index in column.info.indices: + if sum([index is x for x in lst]) == 0: # ensure uniqueness + lst.append(index) + return TableIndices(lst) + + @property + def loc(self): + ''' + Return a TableLoc object that can be used for retrieving + rows by index in a given data range. Note that both loc + and iloc work only with single-column indices. + ''' + return TableLoc(self) + + @property + def iloc(self): + ''' + Return a TableILoc object that can be used for retrieving + indexed rows in the order they appear in the index. + ''' + return TableILoc(self) + + def add_index(self, colnames, engine=None, unique=False): + ''' + Insert a new index among one or more columns. + If there are no indices, make this index the + primary table index. + + Parameters + ---------- + colnames : str or list + List of column names (or a single column name) to index + engine : type or None + Indexing engine class to use, from among SortedArray, BST, + FastBST, and FastRBT. If the supplied argument is None (by + default), use SortedArray. + unique : bool + Whether the values of the index must be unique. Default is False. + ''' + if isinstance(colnames, six.string_types): + colnames = (colnames,) + columns = self.columns[tuple(colnames)].values() + + # make sure all columns support indexing + for col in columns: + if not getattr(col.info, '_supports_indexing', False): + raise ValueError('Cannot create an index on column "{0}", of ' + 'type "{1}"'.format(col.info.name, type(col))) + + index = Index(columns, engine=engine, unique=unique) + if not self.indices: + self.primary_key = colnames + for col in columns: + col.info.indices.append(index) + + def remove_indices(self, colname): + ''' + Remove all indices involving the given column. + If the primary index is removed, the new primary + index will be the most recently added remaining + index. + + Parameters + ---------- + colname : str + Name of column + ''' + col = self.columns[colname] + for index in self.indices: + try: + index.col_position(col.info.name) + except ValueError: + pass + else: + for c in index.columns: + c.info.indices.remove(index) + + def index_mode(self, mode): + ''' + Return a context manager for an indexing mode. + + Parameters + ---------- + mode : str + Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'. + In 'discard_on_copy' mode, + indices are not copied whenever columns or tables are copied. + In 'freeze' mode, indices are not modified whenever columns are + modified; at the exit of the context, indices refresh themselves + based on column values. This mode is intended for scenarios in + which one intends to make many additions or modifications in an + indexed column. + In 'copy_on_getitem' mode, indices are copied when taking column + slices as well as table slices, so col[i0:i1] will preserve + indices. + ''' + return _IndexModeContext(self, mode) + + def __array__(self, dtype=None): + """Support converting Table to np.array via np.array(table). + + Coercion to a different dtype via np.array(table, dtype) is not + supported and will raise a ValueError. + """ + if dtype is not None: + raise ValueError('Datatype coercion is not allowed') + + # This limitation is because of the following unexpected result that + # should have made a table copy while changing the column names. + # + # >>> d = astropy.table.Table([[1,2],[3,4]]) + # >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')]) + # array([(0, 0), (0, 0)], + # dtype=[('a', ' 1 and + not self._add_as_mixin_column(col)): + col = col.view(NdarrayMixin) + + if isinstance(col, (Column, MaskedColumn)): + col = self.ColumnClass(name=(name or col.info.name or def_name), + data=col, dtype=dtype, + copy=copy, copy_indices=self._init_indices) + elif self._add_as_mixin_column(col): + # Copy the mixin column attributes if they exist since the copy below + # may not get this attribute. + if copy: + col = col_copy(col, copy_indices=self._init_indices) + + col.info.name = name or col.info.name or def_name + elif isinstance(col, np.ndarray) or isiterable(col): + col = self.ColumnClass(name=(name or def_name), data=col, dtype=dtype, + copy=copy, copy_indices=self._init_indices) + else: + raise ValueError('Elements in list initialization must be ' + 'either Column or list-like') + + cols.append(col) + + self._init_from_cols(cols) + + def _init_from_ndarray(self, data, names, dtype, n_cols, copy): + """Initialize table from an ndarray structured array""" + + data_names = data.dtype.names or _auto_names(n_cols) + struct = data.dtype.names is not None + names = [name or data_names[i] for i, name in enumerate(names)] + + cols = ([data[name] for name in data_names] if struct else + [data[:, i] for i in range(n_cols)]) + + # Set self.masked appropriately, then get class to create column instances. + self._set_masked_from_cols(cols) + + if copy: + self._init_from_list(cols, names, dtype, n_cols, copy) + else: + dtype = [(name, col.dtype, col.shape[1:]) for name, col in zip(names, cols)] + newdata = data.view(dtype).ravel() + columns = self.TableColumns() + + for name in names: + columns[name] = self.ColumnClass(name=name, data=newdata[name]) + columns[name].info.parent_table = self + self.columns = columns + + def _init_from_dict(self, data, names, dtype, n_cols, copy): + """Initialize table from a dictionary of columns""" + + # TODO: is this restriction still needed with no ndarray? + if not copy: + raise ValueError('Cannot use copy=False with a dict data input') + + data_list = [data[name] for name in names] + self._init_from_list(data_list, names, dtype, n_cols, copy) + + def _init_from_table(self, data, names, dtype, n_cols, copy): + """Initialize table from an existing Table object """ + + table = data # data is really a Table, rename for clarity + self.meta.clear() + self.meta.update(deepcopy(table.meta)) + self.primary_key = table.primary_key + cols = list(table.columns.values()) + + self._init_from_list(cols, names, dtype, n_cols, copy) + + def _convert_col_for_table(self, col): + """ + Make sure that all Column objects have correct class for this type of + Table. For a base Table this most commonly means setting to + MaskedColumn if the table is masked. Table subclasses like QTable + override this method. + """ + if col.__class__ is not self.ColumnClass and isinstance(col, Column): + col = self.ColumnClass(col) # copy attributes and reference data + return col + + def _init_from_cols(self, cols): + """Initialize table from a list of Column or mixin objects""" + + lengths = set(len(col) for col in cols) + if len(lengths) != 1: + raise ValueError('Inconsistent data column lengths: {0}' + .format(lengths)) + + # Set the table masking + self._set_masked_from_cols(cols) + + # Make sure that all Column-based objects have correct class. For + # plain Table this is self.ColumnClass, but for instance QTable will + # convert columns with units to a Quantity mixin. + newcols = [self._convert_col_for_table(col) for col in cols] + self._make_table_from_cols(self, newcols) + + # Deduplicate indices. It may happen that after pickling or when + # initing from an existing table that column indices which had been + # references to a single index object got *copied* into an independent + # object. This results in duplicates which will cause downstream problems. + index_dict = {} + for col in self.itercols(): + for i, index in enumerate(col.info.indices or []): + names = tuple(ind_col.info.name for ind_col in index.columns) + if names in index_dict: + col.info.indices[i] = index_dict[names] + else: + index_dict[names] = index + + def _new_from_slice(self, slice_): + """Create a new table as a referenced slice from self.""" + + table = self.__class__(masked=self.masked) + table.meta.clear() + table.meta.update(deepcopy(self.meta)) + table.primary_key = self.primary_key + cols = self.columns.values() + + newcols = [] + for col in cols: + col.info._copy_indices = self._copy_indices + newcol = col[slice_] + if col.info.indices: + newcol = col.info.slice_indices(newcol, slice_, len(col)) + newcols.append(newcol) + col.info._copy_indices = True + + self._make_table_from_cols(table, newcols) + return table + + @staticmethod + def _make_table_from_cols(table, cols): + """ + Make ``table`` in-place so that it represents the given list of ``cols``. + """ + colnames = set(col.info.name for col in cols) + if None in colnames: + raise TypeError('Cannot have None for column name') + if len(colnames) != len(cols): + raise ValueError('Duplicate column names') + + columns = table.TableColumns((col.info.name, col) for col in cols) + + for col in cols: + col.info.parent_table = table + if table.masked and not hasattr(col, 'mask'): + col.mask = FalseArray(col.shape) + + table.columns = columns + + def itercols(self): + """ + Iterate over the columns of this table. + + Examples + -------- + + To iterate over the columns of a table:: + + >>> t = Table([[1], [2]]) + >>> for col in t.itercols(): + ... print(col) + col0 + ---- + 1 + col1 + ---- + 2 + + Using ``itercols()`` is similar to ``for col in t.columns.values()`` + but is syntactically preferred. + """ + for colname in self.columns: + yield self[colname] + + def _base_repr_(self, html=False, descr_vals=None, max_width=None, + tableid=None, show_dtype=True, max_lines=None, + tableclass=None): + if descr_vals is None: + descr_vals = [self.__class__.__name__] + if self.masked: + descr_vals.append('masked=True') + descr_vals.append('length={0}'.format(len(self))) + + descr = '<' + ' '.join(descr_vals) + '>\n' + + if html: + from ..utils.xml.writer import xml_escape + descr = xml_escape(descr) + + if tableid is None: + tableid = 'table{id}'.format(id=id(self)) + + data_lines, outs = self.formatter._pformat_table( + self, tableid=tableid, html=html, max_width=max_width, + show_name=True, show_unit=None, show_dtype=show_dtype, + max_lines=max_lines, tableclass=tableclass) + + out = descr + '\n'.join(data_lines) + if six.PY2 and isinstance(out, six.text_type): + out = out.encode('utf-8') + + return out + + def _repr_html_(self): + return self._base_repr_(html=True, max_width=-1, + tableclass=conf.default_notebook_table_class) + + def __repr__(self): + return self._base_repr_(html=False, max_width=None) + + def __unicode__(self): + return '\n'.join(self.pformat()) + if not six.PY2: + __str__ = __unicode__ + + def __bytes__(self): + return six.text_type(self).encode('utf-8') + if six.PY2: + __str__ = __bytes__ + + @property + def has_mixin_columns(self): + """ + True if table has any mixin columns (defined as columns that are not Column + subclasses). + """ + return any(has_info_class(col, MixinInfo) for col in self.columns.values()) + + def _add_as_mixin_column(self, col): + """ + Determine if ``col`` should be added to the table directly as + a mixin column. + """ + if isinstance(col, BaseColumn): + return False + + # Is it a mixin but not not Quantity (which gets converted to Column with + # unit set). + return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo) + + def pprint(self, max_lines=None, max_width=None, show_name=True, + show_unit=None, show_dtype=False, align=None): + """Print a formatted string representation of the table. + + If no value of ``max_lines`` is supplied then the height of the + screen terminal is used to set ``max_lines``. If the terminal + height cannot be determined then the default is taken from the + configuration item ``astropy.conf.max_lines``. If a negative + value of ``max_lines`` is supplied then there is no line limit + applied. + + The same applies for max_width except the configuration item is + ``astropy.conf.max_width``. + + Parameters + ---------- + max_lines : int + Maximum number of lines in table output. + + max_width : int or `None` + Maximum character width of output. + + show_name : bool + Include a header row for column names. Default is True. + + show_unit : bool + Include a header row for unit. Default is to show a row + for units only if one or more columns has a defined value + for the unit. + + show_dtype : bool + Include a header row for column dtypes. Default is True. + + align : str or list or tuple or `None` + Left/right alignment of columns. Default is right (None) for all + columns. Other allowed values are '>', '<', '^', and '0=' for + right, left, centered, and 0-padded, respectively. A list of + strings can be provided for alignment of tables with multiple + columns. + """ + lines, outs = self.formatter._pformat_table(self, max_lines, max_width, + show_name=show_name, show_unit=show_unit, + show_dtype=show_dtype, align=align) + if outs['show_length']: + lines.append('Length = {0} rows'.format(len(self))) + + n_header = outs['n_header'] + + for i, line in enumerate(lines): + if i < n_header: + color_print(line, 'red') + else: + print(line) + + def _make_index_row_display_table(self, index_row_name): + if index_row_name not in self.columns: + idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self))) + return self.__class__([idx_col] + self.columns.values(), + copy=False) + else: + return self + + def show_in_notebook(self, tableid=None, css=None, display_length=50, + table_class='astropy-default', show_row_index='idx'): + """Render the table in HTML and show it in the IPython notebook. + + Parameters + ---------- + tableid : str or `None` + An html ID tag for the table. Default is ``table{id}-XXX``, where + id is the unique integer id of the table object, id(self), and XXX + is a random number to avoid conflicts when printing the same table + multiple times. + table_class : str or `None` + A string with a list of HTML classes used to style the table. + The special default string ('astropy-default') means that the string + will be retrieved from the configuration item + ``astropy.table.default_notebook_table_class``. Note that these + table classes may make use of bootstrap, as this is loaded with the + notebook. See `this page `_ + for the list of classes. + css : string + A valid CSS string declaring the formatting for the table. Defaults + to ``astropy.table.jsviewer.DEFAULT_CSS_NB``. + display_length : int, optional + Number or rows to show. Defaults to 50. + show_row_index : str or False + If this does not evaluate to False, a column with the given name + will be added to the version of the table that gets displayed. + This new column shows the index of the row in the table itself, + even when the displayed table is re-sorted by another column. Note + that if a column with this name already exists, this option will be + ignored. Defaults to "idx". + + Notes + ----- + Currently, unlike `show_in_browser` (with ``jsviewer=True``), this + method needs to access online javascript code repositories. This is due + to modern browsers' limitations on accessing local files. Hence, if you + call this method while offline (and don't have a cached version of + jquery and jquery.dataTables), you will not get the jsviewer features. + """ + + from .jsviewer import JSViewer + from IPython.display import HTML + + if tableid is None: + tableid = 'table{0}-{1}'.format(id(self), + np.random.randint(1, 1e6)) + + jsv = JSViewer(display_length=display_length) + if show_row_index: + display_table = self._make_index_row_display_table(show_row_index) + else: + display_table = self + if table_class == 'astropy-default': + table_class = conf.default_notebook_table_class + html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid, + max_lines=-1, show_dtype=False, + tableclass=table_class) + + columns = display_table.columns.values() + sortable_columns = [i for i, col in enumerate(columns) + if col.dtype.kind in 'iufc'] + html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns) + return HTML(html) + + def show_in_browser(self, max_lines=5000, jsviewer=False, + browser='default', jskwargs={'use_local_files': True}, + tableid=None, table_class="display compact", + css=None, show_row_index='idx'): + """Render the table in HTML and show it in a web browser. + + Parameters + ---------- + max_lines : int + Maximum number of rows to export to the table (set low by default + to avoid memory issues, since the browser view requires duplicating + the table in memory). A negative value of ``max_lines`` indicates + no row limit. + jsviewer : bool + If `True`, prepends some javascript headers so that the table is + rendered as a `DataTables `_ data table. + This allows in-browser searching & sorting. + browser : str + Any legal browser name, e.g. ``'firefox'``, ``'chrome'``, + ``'safari'`` (for mac, you may need to use ``'open -a + "/Applications/Google Chrome.app" {}'`` for Chrome). If + ``'default'``, will use the system default browser. + jskwargs : dict + Passed to the `astropy.table.JSViewer` init. Defaults to + ``{'use_local_files': True}`` which means that the JavaScript + libraries will be served from local copies. + tableid : str or `None` + An html ID tag for the table. Default is ``table{id}``, where id + is the unique integer id of the table object, id(self). + table_class : str or `None` + A string with a list of HTML classes used to style the table. + Default is "display compact", and other possible values can be + found in http://www.datatables.net/manual/styling/classes + css : string + A valid CSS string declaring the formatting for the table. Defaults + to ``astropy.table.jsviewer.DEFAULT_CSS``. + show_row_index : str or False + If this does not evaluate to False, a column with the given name + will be added to the version of the table that gets displayed. + This new column shows the index of the row in the table itself, + even when the displayed table is re-sorted by another column. Note + that if a column with this name already exists, this option will be + ignored. Defaults to "idx". + """ + + import os + import webbrowser + import tempfile + from ..extern.six.moves.urllib.parse import urljoin + from ..extern.six.moves.urllib.request import pathname2url + from .jsviewer import DEFAULT_CSS + + if css is None: + css = DEFAULT_CSS + + # We can't use NamedTemporaryFile here because it gets deleted as + # soon as it gets garbage collected. + tmpdir = tempfile.mkdtemp() + path = os.path.join(tmpdir, 'table.html') + + with open(path, 'w') as tmp: + if jsviewer: + if show_row_index: + display_table = self._make_index_row_display_table(show_row_index) + else: + display_table = self + display_table.write(tmp, format='jsviewer', css=css, + max_lines=max_lines, jskwargs=jskwargs, + table_id=tableid, table_class=table_class) + else: + self.write(tmp, format='html') + + try: + br = webbrowser.get(None if browser == 'default' else browser) + except webbrowser.Error: + log.error("Browser '{}' not found.".format(browser)) + else: + br.open(urljoin('file:', pathname2url(path))) + + def pformat(self, max_lines=None, max_width=None, show_name=True, + show_unit=None, show_dtype=False, html=False, tableid=None, + align=None, tableclass=None): + """Return a list of lines for the formatted string representation of + the table. + + If no value of ``max_lines`` is supplied then the height of the + screen terminal is used to set ``max_lines``. If the terminal + height cannot be determined then the default is taken from the + configuration item ``astropy.conf.max_lines``. If a negative + value of ``max_lines`` is supplied then there is no line limit + applied. + + The same applies for ``max_width`` except the configuration item is + ``astropy.conf.max_width``. + + Parameters + ---------- + max_lines : int or `None` + Maximum number of rows to output + + max_width : int or `None` + Maximum character width of output + + show_name : bool + Include a header row for column names. Default is True. + + show_unit : bool + Include a header row for unit. Default is to show a row + for units only if one or more columns has a defined value + for the unit. + + show_dtype : bool + Include a header row for column dtypes. Default is True. + + html : bool + Format the output as an HTML table. Default is False. + + tableid : str or `None` + An ID tag for the table; only used if html is set. Default is + "table{id}", where id is the unique integer id of the table object, + id(self) + + align : str or list or tuple or `None` + Left/right alignment of columns. Default is right (None) for all + columns. Other allowed values are '>', '<', '^', and '0=' for + right, left, centered, and 0-padded, respectively. A list of + strings can be provided for alignment of tables with multiple + columns. + + tableclass : str or list of str or `None` + CSS classes for the table; only used if html is set. Default is + None. + + Returns + ------- + lines : list + Formatted table as a list of strings. + + """ + + lines, outs = self.formatter._pformat_table( + self, max_lines, max_width, show_name=show_name, + show_unit=show_unit, show_dtype=show_dtype, html=html, + tableid=tableid, tableclass=tableclass, align=align) + + if outs['show_length']: + lines.append('Length = {0} rows'.format(len(self))) + + return lines + + def more(self, max_lines=None, max_width=None, show_name=True, + show_unit=None, show_dtype=False): + """Interactively browse table with a paging interface. + + Supported keys:: + + f, : forward one page + b : back one page + r : refresh same page + n : next row + p : previous row + < : go to beginning + > : go to end + q : quit browsing + h : print this help + + Parameters + ---------- + max_lines : int + Maximum number of lines in table output + + max_width : int or `None` + Maximum character width of output + + show_name : bool + Include a header row for column names. Default is True. + + show_unit : bool + Include a header row for unit. Default is to show a row + for units only if one or more columns has a defined value + for the unit. + + show_dtype : bool + Include a header row for column dtypes. Default is True. + """ + self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name, + show_unit=show_unit, show_dtype=show_dtype) + + def __getitem__(self, item): + if isinstance(item, six.string_types): + return self.columns[item] + elif isinstance(item, (int, np.integer)): + return self.Row(self, item) + elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'): + return self.Row(self, item.item()) + elif (isinstance(item, (tuple, list)) and item and + all(isinstance(x, six.string_types) for x in item)): + bad_names = [x for x in item if x not in self.colnames] + if bad_names: + raise ValueError('Slice name(s) {0} not valid column name(s)' + .format(', '.join(bad_names))) + out = self.__class__([self[x] for x in item], + meta=deepcopy(self.meta), + copy_indices=self._copy_indices) + out._groups = groups.TableGroups(out, indices=self.groups._indices, + keys=self.groups._keys) + return out + elif ((isinstance(item, np.ndarray) and item.size == 0) or + (isinstance(item, (tuple, list)) and not item)): + # If item is an empty array/list/tuple then return the table with no rows + return self._new_from_slice([]) + elif (isinstance(item, slice) or + isinstance(item, np.ndarray) or + isinstance(item, list) or + isinstance(item, tuple) and all(isinstance(x, np.ndarray) + for x in item)): + # here for the many ways to give a slice; a tuple of ndarray + # is produced by np.where, as in t[np.where(t['a'] > 2)] + # For all, a new table is constructed with slice of all columns + return self._new_from_slice(item) + else: + raise ValueError('Illegal type {0} for table item access' + .format(type(item))) + + def __setitem__(self, item, value): + # If the item is a string then it must be the name of a column. + # If that column doesn't already exist then create it now. + if isinstance(item, six.string_types) and item not in self.colnames: + NewColumn = self.MaskedColumn if self.masked else self.Column + # If value doesn't have a dtype and won't be added as a mixin then + # convert to a numpy array. + if not hasattr(value, 'dtype') and not self._add_as_mixin_column(value): + value = np.asarray(value) + + # Structured ndarray gets viewed as a mixin (unless already a valid + # mixin class). + if (isinstance(value, np.ndarray) and len(value.dtype) > 1 and + not self._add_as_mixin_column(value)): + value = value.view(NdarrayMixin) + + # Make new column and assign the value. If the table currently + # has no rows (len=0) of the value is already a Column then + # define new column directly from value. In the latter case + # this allows for propagation of Column metadata. Otherwise + # define a new column with the right length and shape and then + # set it from value. This allows for broadcasting, e.g. t['a'] + # = 1. + name = item + # If this is a column-like object that could be added directly to table + if isinstance(value, BaseColumn) or self._add_as_mixin_column(value): + # If we're setting a new column to a scalar, broadcast it. + # (things will fail in _init_from_cols if this doesn't work) + if (len(self) > 0 and (getattr(value, 'isscalar', False) or + getattr(value, 'shape', None) == () or + len(value) == 1)): + new_shape = (len(self),) + getattr(value, 'shape', ())[1:] + if isinstance(value, np.ndarray): + value = np_broadcast_to(value, shape=new_shape, + subok=True) + elif isinstance(value, ShapedLikeNDArray): + value = value._apply(np_broadcast_to, shape=new_shape, + subok=True) + + new_column = col_copy(value) + new_column.info.name = name + + elif len(self) == 0: + new_column = NewColumn(value, name=name) + else: + new_column = NewColumn(name=name, length=len(self), dtype=value.dtype, + shape=value.shape[1:], + unit=getattr(value, 'unit', None)) + new_column[:] = value + + # Now add new column to the table + self.add_columns([new_column], copy=False) + + else: + n_cols = len(self.columns) + + if isinstance(item, six.string_types): + # Set an existing column by first trying to replace, and if + # this fails do an in-place update. See definition of mask + # property for discussion of the _setitem_inplace attribute. + if (not getattr(self, '_setitem_inplace', False) + and not conf.replace_inplace): + try: + self._replace_column_warnings(item, value) + return + except Exception: + pass + self.columns[item][:] = value + + elif isinstance(item, (int, np.integer)): + # Set the corresponding row assuming value is an iterable. + if not hasattr(value, '__len__'): + raise TypeError('Right side value must be iterable') + + if len(value) != n_cols: + raise ValueError('Right side value needs {0} elements (one for each column)' + .format(n_cols)) + + for col, val in zip(self.columns.values(), value): + col[item] = val + + elif (isinstance(item, slice) or + isinstance(item, np.ndarray) or + isinstance(item, list) or + (isinstance(item, tuple) and # output from np.where + all(isinstance(x, np.ndarray) for x in item))): + + if isinstance(value, Table): + vals = (col for col in value.columns.values()) + + elif isinstance(value, np.ndarray) and value.dtype.names: + vals = (value[name] for name in value.dtype.names) + + elif np.isscalar(value): + import itertools + vals = itertools.repeat(value, n_cols) + + else: # Assume this is an iterable that will work + if len(value) != n_cols: + raise ValueError('Right side value needs {0} elements (one for each column)' + .format(n_cols)) + vals = value + + for col, val in zip(self.columns.values(), vals): + col[item] = val + + else: + raise ValueError('Illegal type {0} for table item access' + .format(type(item))) + + def __delitem__(self, item): + if isinstance(item, six.string_types): + self.remove_column(item) + elif isinstance(item, tuple): + self.remove_columns(item) + + def field(self, item): + """Return column[item] for recarray compatibility.""" + return self.columns[item] + + @property + def masked(self): + return self._masked + + @masked.setter + def masked(self, masked): + raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)' + ' to convert to a masked table)') + + def _set_masked(self, masked): + """ + Set the table masked property. + + Parameters + ---------- + masked : bool + State of table masking (`True` or `False`) + """ + if hasattr(self, '_masked'): + # The only allowed change is from None to False or True, or False to True + if self._masked is None and masked in [False, True]: + self._masked = masked + elif self._masked is False and masked is True: + log.info("Upgrading Table to masked Table. Use Table.filled() to convert to unmasked table.") + self._masked = masked + elif self._masked is masked: + raise Exception("Masked attribute is already set to {0}".format(masked)) + else: + raise Exception("Cannot change masked attribute to {0} once it is set to {1}" + .format(masked, self._masked)) + else: + if masked in [True, False, None]: + self._masked = masked + else: + raise ValueError("masked should be one of True, False, None") + if self._masked: + self._column_class = self.MaskedColumn + else: + self._column_class = self.Column + + @property + def ColumnClass(self): + if self._column_class is None: + return self.Column + else: + return self._column_class + + @property + def dtype(self): + return np.dtype([descr(col) for col in self.columns.values()]) + + @property + def colnames(self): + return list(self.columns.keys()) + + def keys(self): + return list(self.columns.keys()) + + def __len__(self): + if len(self.columns) == 0: + return 0 + + lengths = set(len(col) for col in self.columns.values()) + if len(lengths) != 1: + len_strs = [' {0} : {1}'.format(name, len(col)) for name, col in self.columns.items()] + raise ValueError('Column length mismatch:\n{0}'.format('\n'.join(len_strs))) + + return lengths.pop() + + def index_column(self, name): + """ + Return the positional index of column ``name``. + + Parameters + ---------- + name : str + column name + + Returns + ------- + index : int + Positional index of column ``name``. + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + + Get index of column 'b' of the table:: + + >>> t.index_column('b') + 1 + """ + try: + return self.colnames.index(name) + except ValueError: + raise ValueError("Column {0} does not exist".format(name)) + + def add_column(self, col, index=None, name=None, rename_duplicate=False): + """ + Add a new Column object ``col`` to the table. If ``index`` + is supplied then insert column before ``index`` position + in the list of columns, otherwise append column to the end + of the list. + + Parameters + ---------- + col : Column + Column object to add. + index : int or `None` + Insert column before this position or at end (default). + name : str + Column name + rename_duplicate : bool + Uniquify column name if it already exist. Default is False. + + Examples + -------- + Create a table with two columns 'a' and 'b':: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) + >>> print(t) + a b + --- --- + 1 0.1 + 2 0.2 + 3 0.3 + + Create a third column 'c' and append it to the end of the table:: + + >>> col_c = Column(name='c', data=['x', 'y', 'z']) + >>> t.add_column(col_c) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + + Add column 'd' at position 1. Note that the column is inserted + before the given index:: + + >>> col_d = Column(name='d', data=['a', 'b', 'c']) + >>> t.add_column(col_d, 1) + >>> print(t) + a d b c + --- --- --- --- + 1 a 0.1 x + 2 b 0.2 y + 3 c 0.3 z + + Add second column named 'b' with rename_duplicate:: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) + >>> col_b = Column(name='b', data=[1.1, 1.2, 1.3]) + >>> t.add_column(col_b, rename_duplicate=True) + >>> print(t) + a b b_1 + --- --- --- + 1 0.1 1.1 + 2 0.2 1.2 + 3 0.3 1.3 + + Add an unnamed column or mixin object in the table using a default name + or by specifying an explicit name with ``name``. Name can also be overridden:: + + >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) + >>> col_c = Column(data=['x', 'y']) + >>> t.add_column(col_c) + >>> t.add_column(col_c, name='c') + >>> col_b = Column(name='b', data=[1.1, 1.2]) + >>> t.add_column(col_b, name='d') + >>> print(t) + a b col2 c d + --- --- ---- --- --- + 1 0.1 x x 1.1 + 2 0.2 y y 1.2 + + To add several columns use add_columns. + """ + if index is None: + index = len(self.columns) + if name is not None: + name = (name,) + + self.add_columns([col], [index], name, rename_duplicate=rename_duplicate) + + def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False): + """ + Add a list of new Column objects ``cols`` to the table. If a + corresponding list of ``indexes`` is supplied then insert column + before each ``index`` position in the *original* list of columns, + otherwise append columns to the end of the list. + + Parameters + ---------- + cols : list of Columns + Column objects to add. + indexes : list of ints or `None` + Insert column before this position or at end (default). + names : list of str + Column names + copy : bool + Make a copy of the new columns. Default is True. + rename_duplicate : bool + Uniquify new column names if they duplicate the existing ones. + Default is False. + + + Examples + -------- + Create a table with two columns 'a' and 'b':: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) + >>> print(t) + a b + --- --- + 1 0.1 + 2 0.2 + 3 0.3 + + Create column 'c' and 'd' and append them to the end of the table:: + + >>> col_c = Column(name='c', data=['x', 'y', 'z']) + >>> col_d = Column(name='d', data=['u', 'v', 'w']) + >>> t.add_columns([col_c, col_d]) + >>> print(t) + a b c d + --- --- --- --- + 1 0.1 x u + 2 0.2 y v + 3 0.3 z w + + Add column 'c' at position 0 and column 'd' at position 1. Note that + the columns are inserted before the given position:: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) + >>> col_c = Column(name='c', data=['x', 'y', 'z']) + >>> col_d = Column(name='d', data=['u', 'v', 'w']) + >>> t.add_columns([col_c, col_d], [0, 1]) + >>> print(t) + c a d b + --- --- --- --- + x 1 u 0.1 + y 2 v 0.2 + z 3 w 0.3 + + Add second column 'b' and column 'c' with ``rename_duplicate``:: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) + >>> col_b = Column(name='b', data=[1.1, 1.2, 1.3]) + >>> col_c = Column(name='c', data=['x', 'y', 'z']) + >>> t.add_columns([col_b, col_c], rename_duplicate=True) + >>> print(t) + a b b_1 c + --- --- --- --- + 1 0.1 1.1 x + 2 0.2 1.2 y + 3 0.3 1.3 z + + Add unnamed columns or mixin objects in the table using default names + or by specifying explicit names with ``names``. Names can also be overridden:: + + >>> t = Table() + >>> col_a = Column(data=['x', 'y']) + >>> col_b = Column(name='b', data=['u', 'v']) + >>> t.add_columns([col_a, col_b]) + >>> t.add_columns([col_a, col_b], names=['c', 'd']) + >>> print(t) + col0 b c d + ---- --- --- --- + x u x u + y v y v + """ + if indexes is None: + indexes = [len(self.columns)] * len(cols) + elif len(indexes) != len(cols): + raise ValueError('Number of indexes must match number of cols') + + if copy: + cols = [col_copy(col) for col in cols] + + if len(self.columns) == 0: + # No existing table data, init from cols + newcols = cols + else: + newcols = list(self.columns.values()) + new_indexes = list(range(len(newcols) + 1)) + for col, index in zip(cols, indexes): + i = new_indexes.index(index) + new_indexes.insert(i, None) + newcols.insert(i, col) + + if names is None: + names = (None,) * len(cols) + elif len(names) != len(cols): + raise ValueError('Number of names must match number of cols') + + for i, (col, name) in enumerate(zip(cols, names)): + if name is None: + if col.info.name is not None: + continue + name = 'col{}'.format(i + len(self.columns)) + if col.info.parent_table is not None: + col = col_copy(col) + col.info.name = name + + if rename_duplicate: + existing_names = set(self.colnames) + for col in cols: + i = 1 + orig_name = col.info.name + while col.info.name in existing_names: + # If the column belongs to another table then copy it + # before renaming + if col.info.parent_table is not None: + col = col_copy(col) + new_name = '{0}_{1}'.format(orig_name, i) + col.info.name = new_name + i += 1 + existing_names.add(new_name) + + self._init_from_cols(newcols) + + def _replace_column_warnings(self, name, col): + """ + Same as replace_column but issues warnings under various circumstances. + """ + warns = conf.replace_warnings + + if 'refcount' in warns and name in self.colnames: + refcount = sys.getrefcount(self[name]) + + if name in self.colnames: + old_col = self[name] + + # This may raise an exception (e.g. t['a'] = 1) in which case none of + # the downstream code runs. + self.replace_column(name, col) + + if 'always' in warns: + warnings.warn("replaced column '{}'".format(name), + TableReplaceWarning, stacklevel=3) + + if 'slice' in warns: + try: + # Check for ndarray-subclass slice. An unsliced instance + # has an ndarray for the base while sliced has the same class + # as parent. + if isinstance(old_col.base, old_col.__class__): + msg = ("replaced column '{}' which looks like an array slice. " + "The new column no longer shares memory with the " + "original array.".format(name)) + warnings.warn(msg, TableReplaceWarning, stacklevel=3) + except AttributeError: + pass + + if 'refcount' in warns: + # Did reference count change? + new_refcount = sys.getrefcount(self[name]) + if refcount != new_refcount: + msg = ("replaced column '{}' and the number of references " + "to the column changed.".format(name)) + warnings.warn(msg, TableReplaceWarning, stacklevel=3) + + if 'attributes' in warns: + # Any of the standard column attributes changed? + changed_attrs = [] + new_col = self[name] + # Check base DataInfo attributes that any column will have + for attr in DataInfo.attr_names: + if getattr(old_col.info, attr) != getattr(new_col.info, attr): + changed_attrs.append(attr) + + if changed_attrs: + msg = ("replaced column '{}' and column attributes {} changed." + .format(name, changed_attrs)) + warnings.warn(msg, TableReplaceWarning, stacklevel=3) + + def replace_column(self, name, col): + """ + Replace column ``name`` with the new ``col`` object. + + Parameters + ---------- + name : str + Name of column to replace + col : column object (list, ndarray, Column, etc) + New column object to replace the existing column + + Examples + -------- + Replace column 'a' with a float version of itself:: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) + >>> float_a = t['a'].astype(float) + >>> t.replace_column('a', float_a) + """ + if name not in self.colnames: + raise ValueError('column name {0} is not in the table'.format(name)) + + if self[name].info.indices: + raise ValueError('cannot replace a table index column') + + t = self.__class__([col], names=[name]) + cols = OrderedDict(self.columns) + cols[name] = t[name] + self._init_from_cols(cols.values()) + + def remove_row(self, index): + """ + Remove a row from the table. + + Parameters + ---------- + index : int + Index of row to remove + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + + Remove row 1 from the table:: + + >>> t.remove_row(1) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 3 0.3 z + + To remove several rows at the same time use remove_rows. + """ + # check the index against the types that work with np.delete + if not isinstance(index, (six.integer_types, np.integer)): + raise TypeError("Row index must be an integer") + self.remove_rows(index) + + def remove_rows(self, row_specifier): + """ + Remove rows from the table. + + Parameters + ---------- + row_specifier : slice, int, or array of ints + Specification for rows to remove + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + + Remove rows 0 and 2 from the table:: + + >>> t.remove_rows([0, 2]) + >>> print(t) + a b c + --- --- --- + 2 0.2 y + + + Note that there are no warnings if the slice operator extends + outside the data:: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> t.remove_rows(slice(10, 20, 1)) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + """ + # Update indices + for index in self.indices: + index.remove_rows(row_specifier) + + keep_mask = np.ones(len(self), dtype=np.bool) + keep_mask[row_specifier] = False + + columns = self.TableColumns() + for name, col in self.columns.items(): + newcol = col[keep_mask] + newcol.info.parent_table = self + columns[name] = newcol + + self._replace_cols(columns) + + # Revert groups to default (ungrouped) state + if hasattr(self, '_groups'): + del self._groups + + def remove_column(self, name): + """ + Remove a column from the table. + + This can also be done with:: + + del table[name] + + Parameters + ---------- + name : str + Name of column to remove + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + + Remove column 'b' from the table:: + + >>> t.remove_column('b') + >>> print(t) + a c + --- --- + 1 x + 2 y + 3 z + + To remove several columns at the same time use remove_columns. + """ + + self.remove_columns([name]) + + def remove_columns(self, names): + ''' + Remove several columns from the table. + + Parameters + ---------- + names : list + A list containing the names of the columns to remove + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + + Remove columns 'b' and 'c' from the table:: + + >>> t.remove_columns(['b', 'c']) + >>> print(t) + a + --- + 1 + 2 + 3 + + Specifying only a single column also works. Remove column 'b' from the table:: + + >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> t.remove_columns('b') + >>> print(t) + a c + --- --- + 1 x + 2 y + 3 z + + This gives the same as using remove_column. + ''' + if isinstance(names, six.string_types): + names = [names] + + for name in names: + if name not in self.columns: + raise KeyError("Column {0} does not exist".format(name)) + + for name in names: + self.columns.pop(name) + + def _convert_string_dtype(self, in_kind, out_kind, python3_only): + """ + Convert string-like columns to/from bytestring and unicode (internal only). + + Parameters + ---------- + in_kind : str + Input dtype.kind + out_kind : str + Output dtype.kind + python3_only : bool + Only do this operation for Python 3 + """ + if python3_only and six.PY2: + return + + # If there are no `in_kind` columns then do nothing + cols = self.columns.values() + if not any(col.dtype.kind == in_kind for col in cols): + return + + newcols = [] + for col in cols: + if col.dtype.kind == in_kind: + newdtype = re.sub(in_kind, out_kind, col.dtype.str) + newcol = col.__class__(col, dtype=newdtype) + else: + newcol = col + newcols.append(newcol) + + self._init_from_cols(newcols) + + def convert_bytestring_to_unicode(self, python3_only=False): + """ + Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U') assuming + ASCII encoding. + + Internally this changes string columns to represent each character in the string + with a 4-byte UCS-4 equivalent, so it is inefficient for memory but allows Python + 3 scripts to manipulate string arrays with natural syntax. + + The ``python3_only`` parameter is provided as a convenience so that code can + be written in a Python 2 / 3 compatible way:: + + >>> t = Table.read('my_data.fits') + >>> t.convert_bytestring_to_unicode(python3_only=True) + + Parameters + ---------- + python3_only : bool + Only do this operation for Python 3 + """ + self._convert_string_dtype('S', 'U', python3_only) + + def convert_unicode_to_bytestring(self, python3_only=False): + """ + Convert ASCII-only unicode columns (dtype.kind='U') to bytestring (dtype.kind='S'). + + When exporting a unicode string array to a file in Python 3, it may be desirable + to encode unicode columns as bytestrings. This routine takes advantage of numpy + automated conversion which works for strings that are pure ASCII. + + The ``python3_only`` parameter is provided as a convenience so that code can + be written in a Python 2 / 3 compatible way:: + + >>> t.convert_unicode_to_bytestring(python3_only=True) + >>> t.write('my_data.fits') + + Parameters + ---------- + python3_only : bool + Only do this operation for Python 3 + """ + self._convert_string_dtype('U', 'S', python3_only) + + def keep_columns(self, names): + ''' + Keep only the columns specified (remove the others). + + Parameters + ---------- + names : list + A list containing the names of the columns to keep. All other + columns will be removed. + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> print(t) + a b c + --- --- --- + 1 0.1 x + 2 0.2 y + 3 0.3 z + + Specifying only a single column name keeps only this column. + Keep only column 'a' of the table:: + + >>> t.keep_columns('a') + >>> print(t) + a + --- + 1 + 2 + 3 + + Specifying a list of column names is keeps is also possible. + Keep columns 'a' and 'c' of the table:: + + >>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']], + ... names=('a', 'b', 'c')) + >>> t.keep_columns(['a', 'c']) + >>> print(t) + a c + --- --- + 1 x + 2 y + 3 z + ''' + + if isinstance(names, six.string_types): + names = [names] + + for name in names: + if name not in self.columns: + raise KeyError("Column {0} does not exist".format(name)) + + remove = list(set(self.keys()) - set(names)) + + self.remove_columns(remove) + + def rename_column(self, name, new_name): + ''' + Rename a column. + + This can also be done directly with by setting the ``name`` attribute + for a column:: + + table[name].name = new_name + + TODO: this won't work for mixins + + Parameters + ---------- + name : str + The current name of the column. + new_name : str + The new name for the column + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c')) + >>> print(t) + a b c + --- --- --- + 1 3 5 + 2 4 6 + + Renaming column 'a' to 'aa':: + + >>> t.rename_column('a' , 'aa') + >>> print(t) + aa b c + --- --- --- + 1 3 5 + 2 4 6 + ''' + + if name not in self.keys(): + raise KeyError("Column {0} does not exist".format(name)) + + self.columns[name].info.name = new_name + + def add_row(self, vals=None, mask=None): + """Add a new row to the end of the table. + + The ``vals`` argument can be: + + sequence (e.g. tuple or list) + Column values in the same order as table columns. + mapping (e.g. dict) + Keys corresponding to column names. Missing values will be + filled with np.zeros for the column dtype. + `None` + All values filled with np.zeros for the column dtype. + + This method requires that the Table object "owns" the underlying array + data. In particular one cannot add a row to a Table that was + initialized with copy=False from an existing array. + + The ``mask`` attribute should give (if desired) the mask for the + values. The type of the mask should match that of the values, i.e. if + ``vals`` is an iterable, then ``mask`` should also be an iterable + with the same length, and if ``vals`` is a mapping, then ``mask`` + should be a dictionary. + + Parameters + ---------- + vals : tuple, list, dict or `None` + Use the specified values in the new row + mask : tuple, list, dict or `None` + Use the specified mask values in the new row + + Examples + -------- + Create a table with three columns 'a', 'b' and 'c':: + + >>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c')) + >>> print(t) + a b c + --- --- --- + 1 4 7 + 2 5 8 + + Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c':: + + >>> t.add_row([3,6,9]) + >>> print(t) + a b c + --- --- --- + 1 4 7 + 2 5 8 + 3 6 9 + """ + self.insert_row(len(self), vals, mask) + + def insert_row(self, index, vals=None, mask=None): + """Add a new row before the given ``index`` position in the table. + + The ``vals`` argument can be: + + sequence (e.g. tuple or list) + Column values in the same order as table columns. + mapping (e.g. dict) + Keys corresponding to column names. Missing values will be + filled with np.zeros for the column dtype. + `None` + All values filled with np.zeros for the column dtype. + + The ``mask`` attribute should give (if desired) the mask for the + values. The type of the mask should match that of the values, i.e. if + ``vals`` is an iterable, then ``mask`` should also be an iterable + with the same length, and if ``vals`` is a mapping, then ``mask`` + should be a dictionary. + + Parameters + ---------- + vals : tuple, list, dict or `None` + Use the specified values in the new row + mask : tuple, list, dict or `None` + Use the specified mask values in the new row + """ + colnames = self.colnames + + N = len(self) + if index < -N or index > N: + raise IndexError("Index {0} is out of bounds for table with length {1}" + .format(index, N)) + if index < 0: + index += N + + def _is_mapping(obj): + """Minimal checker for mapping (dict-like) interface for obj""" + attrs = ('__getitem__', '__len__', '__iter__', 'keys', 'values', 'items') + return all(hasattr(obj, attr) for attr in attrs) + + if mask is not None and not self.masked: + # Possibly issue upgrade warning and update self.ColumnClass. This + # does not change the existing columns. + self._set_masked(True) + + if _is_mapping(vals) or vals is None: + # From the vals and/or mask mappings create the corresponding lists + # that have entries for each table column. + if mask is not None and not _is_mapping(mask): + raise TypeError("Mismatch between type of vals and mask") + + # Now check that the mask is specified for the same keys as the + # values, otherwise things get really confusing. + if mask is not None and set(vals.keys()) != set(mask.keys()): + raise ValueError('keys in mask should match keys in vals') + + if vals and any(name not in colnames for name in vals): + raise ValueError('Keys in vals must all be valid column names') + + vals_list = [] + mask_list = [] + + for name in colnames: + if vals and name in vals: + vals_list.append(vals[name]) + mask_list.append(False if mask is None else mask[name]) + else: + col = self[name] + if hasattr(col, 'dtype'): + # Make a placeholder zero element of the right type which is masked. + # This assumes the appropriate insert() method will broadcast a + # numpy scalar to the right shape. + vals_list.append(np.zeros(shape=(), dtype=col.dtype)) + + # For masked table any unsupplied values are masked by default. + mask_list.append(self.masked and vals is not None) + else: + raise ValueError("Value must be supplied for column '{0}'".format(name)) + + vals = vals_list + mask = mask_list + + if isiterable(vals): + if mask is not None and (not isiterable(mask) or _is_mapping(mask)): + raise TypeError("Mismatch between type of vals and mask") + + if len(self.columns) != len(vals): + raise ValueError('Mismatch between number of vals and columns') + + if mask is not None: + if len(self.columns) != len(mask): + raise ValueError('Mismatch between number of masks and columns') + else: + mask = [False] * len(self.columns) + + else: + raise TypeError('Vals must be an iterable or mapping or None') + + columns = self.TableColumns() + try: + # Insert val at index for each column + for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask): + # If the new row caused a change in self.ColumnClass then + # Column-based classes need to be converted first. This is + # typical for adding a row with mask values to an unmasked table. + if isinstance(col, Column) and not isinstance(col, self.ColumnClass): + col = self.ColumnClass(col, copy=False) + + newcol = col.insert(index, val, axis=0) + if not isinstance(newcol, BaseColumn): + newcol.info.name = name + if self.masked: + newcol.mask = FalseArray(newcol.shape) + + if len(newcol) != N + 1: + raise ValueError('Incorrect length for column {0} after inserting {1}' + ' (expected {2}, got {3})' + .format(name, val, len(newcol), N + 1)) + newcol.info.parent_table = self + + # Set mask if needed + if self.masked: + newcol.mask[index] = mask_ + + columns[name] = newcol + + # insert row in indices + for table_index in self.indices: + table_index.insert_row(index, vals, self.columns.values()) + + except Exception as err: + raise ValueError("Unable to insert row because of exception in column '{0}':\n{1}" + .format(name, err)) + else: + self._replace_cols(columns) + + # Revert groups to default (ungrouped) state + if hasattr(self, '_groups'): + del self._groups + + def _replace_cols(self, columns): + for col, new_col in zip(self.columns.values(), columns.values()): + new_col.info.indices = [] + for index in col.info.indices: + index.columns[index.col_position(col.info.name)] = new_col + new_col.info.indices.append(index) + + self.columns = columns + + def argsort(self, keys=None, kind=None): + """ + Return the indices which would sort the table according to one or + more key columns. This simply calls the `numpy.argsort` function on + the table with the ``order`` parameter set to ``keys``. + + Parameters + ---------- + keys : str or list of str + The column name(s) to order the table by + kind : {'quicksort', 'mergesort', 'heapsort'}, optional + Sorting algorithm. + + Returns + ------- + index_array : ndarray, int + Array of indices that sorts the table by the specified key + column(s). + """ + if isinstance(keys, six.string_types): + keys = [keys] + + # use index sorted order if possible + if keys is not None: + index = get_index(self, self[keys]) + if index is not None: + return index.sorted_data() + + kwargs = {} + if keys: + kwargs['order'] = keys + if kind: + kwargs['kind'] = kind + + if keys: + data = self[keys].as_array() + else: + data = self.as_array() + + return data.argsort(**kwargs) + + def sort(self, keys=None): + ''' + Sort the table according to one or more keys. This operates + on the existing table and does not return a new table. + + Parameters + ---------- + keys : str or list of str + The key(s) to order the table by. If None, use the + primary index of the Table. + + Examples + -------- + Create a table with 3 columns:: + + >>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'], + ... [12,15,18]], names=('firstname','name','tel')) + >>> print(t) + firstname name tel + --------- ------- --- + Max Miller 12 + Jo Miller 15 + John Jackson 18 + + Sorting according to standard sorting rules, first 'name' then 'firstname':: + + >>> t.sort(['name','firstname']) + >>> print(t) + firstname name tel + --------- ------- --- + John Jackson 18 + Jo Miller 15 + Max Miller 12 + ''' + if keys is None: + if not self.indices: + raise ValueError("Table sort requires input keys or a table index") + keys = [x.info.name for x in self.indices[0].columns] + + if isinstance(keys, six.string_types): + keys = [keys] + + indexes = self.argsort(keys) + sort_index = get_index(self, self[keys]) + if sort_index is not None: + # avoid inefficient relabelling of sorted index + prev_frozen = sort_index._frozen + sort_index._frozen = True + + for col in self.columns.values(): + col[:] = col.take(indexes, axis=0) + + if sort_index is not None: + # undo index freeze + sort_index._frozen = prev_frozen + # now relabel the sort index appropriately + sort_index.sort() + + def reverse(self): + ''' + Reverse the row order of table rows. The table is reversed + in place and there are no function arguments. + + Examples + -------- + Create a table with three columns:: + + >>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'], + ... [12,15,18]], names=('firstname','name','tel')) + >>> print(t) + firstname name tel + --------- ------- --- + Max Miller 12 + Jo Miller 15 + John Jackson 18 + + Reversing order:: + + >>> t.reverse() + >>> print(t) + firstname name tel + --------- ------- --- + John Jackson 18 + Jo Miller 15 + Max Miller 12 + ''' + for col in self.columns.values(): + col[:] = col[::-1] + for index in self.indices: + index.reverse() + + @classmethod + def read(cls, *args, **kwargs): + """ + Read and parse a data table and return as a Table. + + This function provides the Table interface to the astropy unified I/O + layer. This allows easily reading a file in many supported data formats + using syntax such as:: + + >>> from astropy.table import Table + >>> dat = Table.read('table.dat', format='ascii') + >>> events = Table.read('events.fits', format='fits') + + The arguments and keywords (other than ``format``) provided to this function are + passed through to the underlying data reader (e.g. `~astropy.io.ascii.read`). + """ + out = io_registry.read(cls, *args, **kwargs) + # For some readers (e.g., ascii.ecsv), the returned `out` class is not + # guaranteed to be the same as the desired output `cls`. If so, + # try coercing to desired class without copying (io.registry.read + # would normally do a copy). The normal case here is swapping + # Table <=> QTable. + if cls is not out.__class__: + try: + out = cls(out, copy=False) + except Exception: + raise TypeError('could not convert reader output to {0} ' + 'class.'.format(cls.__name__)) + return out + + def write(self, *args, **kwargs): + """ + Write this Table object out in the specified format. + + This function provides the Table interface to the astropy unified I/O + layer. This allows easily writing a file in many supported data formats + using syntax such as:: + + >>> from astropy.table import Table + >>> dat = Table([[1, 2], [3, 4]], names=('a', 'b')) + >>> dat.write('table.dat', format='ascii') + + The arguments and keywords (other than ``format``) provided to this function are + passed through to the underlying data reader (e.g. `~astropy.io.ascii.write`). + """ + io_registry.write(self, *args, **kwargs) + + def copy(self, copy_data=True): + ''' + Return a copy of the table. + + Parameters + ---------- + copy_data : bool + If `True` (the default), copy the underlying data array. + Otherwise, use the same data array. The ``meta`` is always + deepcopied regardless of the value for ``copy_data``. + ''' + out = self.__class__(self, copy=copy_data) + + # If the current table is grouped then do the same in the copy + if hasattr(self, '_groups'): + out._groups = groups.TableGroups(out, indices=self._groups._indices, + keys=self._groups._keys) + return out + + def __deepcopy__(self, memo=None): + return self.copy(True) + + def __copy__(self): + return self.copy(False) + + def __lt__(self, other): + if six.PY2: + raise TypeError("unorderable types: Table() < {0}". + format(str(type(other)))) + else: + return super(Table, self).__lt__(other) + + def __gt__(self, other): + if six.PY2: + raise TypeError("unorderable types: Table() > {0}". + format(str(type(other)))) + else: + return super(Table, self).__gt__(other) + + def __le__(self, other): + if six.PY2: + raise TypeError("unorderable types: Table() <= {0}". + format(str(type(other)))) + else: + return super(Table, self).__le__(other) + + def __ge__(self, other): + if six.PY2: + raise TypeError("unorderable types: Table() >= {0}". + format(str(type(other)))) + else: + return super(Table, self).__ge__(other) + + def __eq__(self, other): + + if isinstance(other, Table): + other = other.as_array() + + if self.masked: + if isinstance(other, np.ma.MaskedArray): + result = self.as_array() == other + else: + # If mask is True, then by definition the row doesn't match + # because the other array is not masked. + false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names]) + result = (self.as_array().data == other) & (self.mask == false_mask) + else: + if isinstance(other, np.ma.MaskedArray): + # If mask is True, then by definition the row doesn't match + # because the other array is not masked. + false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names]) + result = (self.as_array() == other.data) & (other.mask == false_mask) + else: + result = self.as_array() == other + + return result + + def __ne__(self, other): + return ~self.__eq__(other) + + @property + def groups(self): + if not hasattr(self, '_groups'): + self._groups = groups.TableGroups(self) + return self._groups + + def group_by(self, keys): + """ + Group this table by the specified ``keys`` + + This effectively splits the table into groups which correspond to + unique values of the ``keys`` grouping object. The output is a new + `TableGroups` which contains a copy of this table but sorted by row + according to ``keys``. + + The ``keys`` input to `group_by` can be specified in different ways: + + - String or list of strings corresponding to table column name(s) + - Numpy array (homogeneous or structured) with same length as this table + - `Table` with same length as this table + + Parameters + ---------- + keys : str, list of str, numpy array, or `Table` + Key grouping object + + Returns + ------- + out : `Table` + New table with groups set + """ + if self.has_mixin_columns: + raise NotImplementedError('group_by not available for tables with mixin columns') + + return groups.table_group_by(self, keys) + + def to_pandas(self): + """ + Return a :class:`pandas.DataFrame` instance + + Returns + ------- + dataframe : :class:`pandas.DataFrame` + A pandas :class:`pandas.DataFrame` instance + + Raises + ------ + ImportError + If pandas is not installed + ValueError + If the Table contains mixin or multi-dimensional columns + """ + from pandas import DataFrame + + if self.has_mixin_columns: + raise ValueError("Cannot convert a table with mixin columns to a pandas DataFrame") + + if any(getattr(col, 'ndim', 1) > 1 for col in self.columns.values()): + raise ValueError("Cannot convert a table with multi-dimensional columns to a pandas DataFrame") + + out = OrderedDict() + + for name, column in self.columns.items(): + if isinstance(column, MaskedColumn): + if column.dtype.kind in ['i', 'u']: + out[name] = column.astype(float).filled(np.nan) + elif column.dtype.kind in ['f', 'c']: + out[name] = column.filled(np.nan) + else: + out[name] = column.astype(np.object).filled(np.nan) + else: + out[name] = column + + if out[name].dtype.byteorder not in ('=', '|'): + out[name] = out[name].byteswap().newbyteorder() + + return DataFrame(out) + + @classmethod + def from_pandas(cls, dataframe): + """ + Create a `Table` from a :class:`pandas.DataFrame` instance + + Parameters + ---------- + dataframe : :class:`pandas.DataFrame` + The pandas :class:`pandas.DataFrame` instance + + Returns + ------- + table : `Table` + A `Table` (or subclass) instance + """ + + out = OrderedDict() + + for name in dataframe.columns: + column = dataframe[name] + mask = np.array(column.isnull()) + data = np.array(column) + + if data.dtype.kind == 'O': + # If all elements of an object array are string-like or np.nan + # then coerce back to a native numpy str/unicode array. + string_types = six.string_types + if not six.PY2: + string_types += (bytes,) + nan = np.nan + if all(isinstance(x, string_types) or x is nan for x in data): + # Force any missing (null) values to b''. Numpy will + # upcast to str/unicode as needed. + data[mask] = b'' + + # When the numpy object array is represented as a list then + # numpy initializes to the correct string or unicode type. + data = np.array([x for x in data]) + + if np.any(mask): + out[name] = MaskedColumn(data=data, name=name, mask=mask) + else: + out[name] = Column(data=data, name=name) + + return cls(out) + + info = TableInfo() + + +class QTable(Table): + """A class to represent tables of heterogeneous data. + + `QTable` provides a class for heterogeneous tabular data which can be + easily modified, for instance adding columns or new rows. + + The `QTable` class is identical to `Table` except that columns with an + associated ``unit`` attribute are converted to `~astropy.units.Quantity` + objects. + + Parameters + ---------- + data : numpy ndarray, dict, list, Table, or table-like object, optional + Data to initialize table. + masked : bool, optional + Specify whether the table is masked. + names : list, optional + Specify column names. + dtype : list, optional + Specify column data types. + meta : dict, optional + Metadata associated with the table. + copy : bool, optional + Copy the input data. Default is True. + rows : numpy ndarray, list of lists, optional + Row-oriented data for table instead of ``data`` argument. + copy_indices : bool, optional + Copy any indices in the input data. Default is True. + **kwargs : dict, optional + Additional keyword args when converting table-like object. + + """ + + def _add_as_mixin_column(self, col): + """ + Determine if ``col`` should be added to the table directly as + a mixin column. + """ + return has_info_class(col, MixinInfo) + + def _convert_col_for_table(self, col): + if (isinstance(col, Column) and getattr(col, 'unit', None) is not None): + # We need to turn the column into a quantity, or a subclass + # identified in the unit (such as u.mag()). + q_cls = getattr(col.unit, '_quantity_class', Quantity) + qcol = q_cls(col.data, col.unit, copy=False) + qcol.info = col.info + col = qcol + else: + col = super(QTable, self)._convert_col_for_table(col) + + return col + + +class NdarrayMixin(np.ndarray): + """ + Mixin column class to allow storage of arbitrary numpy + ndarrays within a Table. This is a subclass of numpy.ndarray + and has the same initialization options as ndarray(). + """ + info = ParentDtypeInfo() + + def __new__(cls, obj, *args, **kwargs): + self = np.array(obj, *args, **kwargs).view(cls) + if 'info' in getattr(obj, '__dict__', ()): + self.info = obj.info + return self + + def __array_finalize__(self, obj): + if obj is None: + return + + if six.callable(super(NdarrayMixin, self).__array_finalize__): + super(NdarrayMixin, self).__array_finalize__(obj) + + # Self was created from template (e.g. obj[slice] or (obj * 2)) + # or viewcast e.g. obj.view(Column). In either case we want to + # init Column attributes for self from obj if possible. + if 'info' in getattr(obj, '__dict__', ()): + self.info = obj.info + + def __reduce__(self): + # patch to pickle Quantity objects (ndarray subclasses), see + # http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html + + object_state = list(super(NdarrayMixin, self).__reduce__()) + object_state[2] = (object_state[2], self.__dict__) + return tuple(object_state) + + def __setstate__(self, state): + # patch to unpickle NdarrayMixin objects (ndarray subclasses), see + # http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html + + nd_state, own_state = state + super(NdarrayMixin, self).__setstate__(nd_state) + self.__dict__.update(own_state) diff --git a/astropy/table/table_helpers.py b/astropy/table/table_helpers.py new file mode 100644 index 0000000..ae7ce66 --- /dev/null +++ b/astropy/table/table_helpers.py @@ -0,0 +1,179 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +Helper functions for table development, mostly creating useful +tables for testing. +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from itertools import cycle +import string +import numpy as np + +from .table import Table, Column +from ..extern.six.moves import zip, range +from ..utils.data_info import ParentDtypeInfo + + +class TimingTables(object): + """ + Object which contains two tables and various other attributes that + are useful for timing and other API tests. + """ + + def __init__(self, size=1000, masked=False): + self.masked = masked + + # Initialize table + self.table = Table(masked=self.masked) + + # Create column with mixed types + np.random.seed(12345) + self.table['i'] = np.arange(size) + self.table['a'] = np.random.random(size) # float + self.table['b'] = np.random.random(size) > 0.5 # bool + self.table['c'] = np.random.random((size, 10)) # 2d column + self.table['d'] = np.random.choice(np.array(list(string.ascii_letters)), size) + + self.extra_row = {'a': 1.2, 'b': True, 'c': np.repeat(1, 10), 'd': 'Z'} + self.extra_column = np.random.randint(0, 100, size) + self.row_indices = np.where(self.table['a'] > 0.9)[0] + self.table_grouped = self.table.group_by('d') + + # Another table for testing joining + self.other_table = Table(masked=self.masked) + self.other_table['i'] = np.arange(1, size, 3) + self.other_table['f'] = np.random.random() + self.other_table.sort('f') + + # Another table for testing hstack + self.other_table_2 = Table(masked=self.masked) + self.other_table_2['g'] = np.random.random(size) + self.other_table_2['h'] = np.random.random((size, 10)) + + self.bool_mask = self.table['a'] > 0.6 + + +def simple_table(size=3, cols=None, kinds='ifS', masked=False): + """ + Return a simple table for testing. + + Example + -------- + :: + + >>> from astropy.table.table_helpers import simple_table + >>> print(simple_table(3, 6, masked=True, kinds='ifOS')) + a b c d e f + --- --- -------- --- --- --- + -- 1.0 {'c': 2} -- 5 5.0 + 2 2.0 -- e 6 -- + 3 -- {'e': 4} f -- 7.0 + + Parameters + ---------- + size : int + Number of table rows + cols : int, optional + Number of table columns. Defaults to number of kinds. + kinds : str + String consisting of the column dtype.kinds. This string + will be cycled through to generate the column dtype. + The allowed values are 'i', 'f', 'S', 'O'. + + Returns + ------- + out : `Table` + New table with appropriate characteristics + """ + if cols is None: + cols = len(kinds) + if cols > 26: + raise ValueError("Max 26 columns in SimpleTable") + + columns = [] + names = [chr(ord('a') + ii) for ii in range(cols)] + letters = np.array([c for c in string.ascii_letters]) + for jj, kind in zip(range(cols), cycle(kinds)): + if kind == 'i': + data = np.arange(1, size + 1, dtype=np.int64) + jj + elif kind == 'f': + data = np.arange(size, dtype=np.float64) + jj + elif kind == 'S': + indices = (np.arange(size) + jj) % len(letters) + data = letters[indices] + elif kind == 'O': + indices = (np.arange(size) + jj) % len(letters) + vals = letters[indices] + data = [{val: index} for val, index in zip(vals, indices)] + else: + raise ValueError('Unknown data kind') + columns.append(Column(data)) + + table = Table(columns, names=names, masked=masked) + if masked: + for ii, col in enumerate(table.columns.values()): + mask = np.array((np.arange(size) + ii) % 3, dtype=bool) + col.mask = ~mask + + return table + + +def complex_table(): + """ + Return a masked table from the io.votable test set that has a wide variety + of stressing types. + """ + from ..utils.data import get_pkg_data_filename + from ..io.votable.table import parse + import warnings + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + votable = parse(get_pkg_data_filename('../io/votable/tests/data/regression.xml'), + pedantic=False) + first_table = votable.get_first_table() + table = first_table.to_table() + + return table + + +class ArrayWrapper(object): + """ + Minimal mixin using a simple wrapper around a numpy array + """ + info = ParentDtypeInfo() + + def __init__(self, data): + self.data = np.array(data) + if 'info' in getattr(data, '__dict__', ()): + self.info = data.info + + def __getitem__(self, item): + if isinstance(item, (int, np.integer)): + out = self.data[item] + else: + out = self.__class__(self.data[item]) + if 'info' in self.__dict__: + out.info = self.info + return out + + def __setitem__(self, item, value): + self.data[item] = value + + def __len__(self): + return len(self.data) + + @property + def dtype(self): + return self.data.dtype + + @property + def shape(self): + return self.data.shape + + def __repr__(self): + return ("<{0} name='{1}' data={2}>" + .format(self.__class__.__name__, self.info.name, self.data)) diff --git a/astropy/table/tests/__init__.py b/astropy/table/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/astropy/table/tests/conftest.py b/astropy/table/tests/conftest.py new file mode 100644 index 0000000..807f266 --- /dev/null +++ b/astropy/table/tests/conftest.py @@ -0,0 +1,198 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +""" +All of the py.test fixtures used by astropy.table are defined here. + +The fixtures can not be defined in the modules that use them, because +those modules are imported twice: once with `from __future__ import +unicode_literals` and once without. py.test complains when the same +fixtures are defined more than once. + +`conftest.py` is a "special" module name for py.test that is always +imported, but is not looked in for tests, and it is the recommended +place to put fixtures that are shared between modules. These fixtures +can not be defined in a module by a different name and still be shared +between modules. +""" + +from copy import deepcopy +from collections import OrderedDict +import pickle + +import pytest +import numpy as np + +from ... import table +from ...table import table_helpers, Table, QTable +from ... import time +from ... import units as u +from ... import coordinates +from .. import pprint + + +@pytest.fixture(params=[table.Column, table.MaskedColumn]) +def Column(request): + # Fixture to run all the Column tests for both an unmasked (ndarray) + # and masked (MaskedArray) column. + return request.param + + +class MaskedTable(table.Table): + def __init__(self, *args, **kwargs): + kwargs['masked'] = True + table.Table.__init__(self, *args, **kwargs) + + +class MyRow(table.Row): + pass + + +class MyColumn(table.Column): + pass + + +class MyMaskedColumn(table.MaskedColumn): + pass + + +class MyTableColumns(table.TableColumns): + pass + + +class MyTableFormatter(pprint.TableFormatter): + pass + + +class MyTable(table.Table): + Row = MyRow + Column = MyColumn + MaskedColumn = MyMaskedColumn + TableColumns = MyTableColumns + TableFormatter = MyTableFormatter + +# Fixture to run all the Column tests for both an unmasked (ndarray) +# and masked (MaskedArray) column. + + +@pytest.fixture(params=['unmasked', 'masked', 'subclass']) +def table_types(request): + class TableTypes: + def __init__(self, request): + if request.param == 'unmasked': + self.Table = table.Table + self.Column = table.Column + elif request.param == 'masked': + self.Table = MaskedTable + self.Column = table.MaskedColumn + elif request.param == 'subclass': + self.Table = MyTable + self.Column = MyColumn + return TableTypes(request) + + +# Fixture to run all the Column tests for both an unmasked (ndarray) +# and masked (MaskedArray) column. +@pytest.fixture(params=[False, True]) +def table_data(request): + class TableData: + def __init__(self, request): + self.Table = MaskedTable if request.param else table.Table + self.Column = table.MaskedColumn if request.param else table.Column + self.COLS = [ + self.Column(name='a', data=[1, 2, 3], description='da', + format='fa', meta={'ma': 1}, unit='ua'), + self.Column(name='b', data=[4, 5, 6], description='db', + format='fb', meta={'mb': 1}, unit='ub'), + self.Column(name='c', data=[7, 8, 9], description='dc', + format='fc', meta={'mc': 1}, unit='ub')] + self.DATA = self.Table(self.COLS) + return TableData(request) + + +class SubclassTable(table.Table): + pass + + +@pytest.fixture(params=[True, False]) +def tableclass(request): + return table.Table if request.param else SubclassTable + + +@pytest.fixture(params=list(range(0, pickle.HIGHEST_PROTOCOL + 1))) +def protocol(request): + """ + Fixture to run all the tests for all available pickle protocols. + """ + return request.param + + +# Fixture to run all tests for both an unmasked (ndarray) and masked +# (MaskedArray) column. +@pytest.fixture(params=[False, True]) +def table_type(request): + # return MaskedTable if request.param else table.Table + try: + request.param + return MaskedTable + except AttributeError: + return table.Table + + +# Stuff for testing mixin columns + +MIXIN_COLS = {'quantity': [0, 1, 2, 3] * u.m, + 'longitude': coordinates.Longitude([0., 1., 5., 6.]*u.deg, + wrap_angle=180.*u.deg), + 'latitude': coordinates.Latitude([5., 6., 10., 11.]*u.deg), + 'time': time.Time([2000, 2001, 2002, 2003], format='jyear'), + 'skycoord': coordinates.SkyCoord(ra=[0, 1, 2, 3] * u.deg, + dec=[0, 1, 2, 3] * u.deg), + 'arraywrap': table_helpers.ArrayWrapper([0, 1, 2, 3]), + 'ndarray': np.array([(7, 'a'), (8, 'b'), (9, 'c'), (9, 'c')], + dtype='\n1\n2\n3".format(Column.__name__) + + def test_format(self, Column): + """Show that the formatted output from str() works""" + from ... import conf + with conf.set_temp('max_lines', 8): + c1 = Column(np.arange(2000), name='a', dtype=float, + format='%6.2f') + assert str(c1).splitlines() == [' a ', + '-------', + ' 0.00', + ' 1.00', + ' ...', + '1998.00', + '1999.00', + 'Length = 2000 rows'] + + def test_convert_numpy_array(self, Column): + d = Column([1, 2, 3], name='a', dtype='i8') + + np_data = np.array(d) + assert np.all(np_data == d) + np_data = np.array(d, copy=False) + assert np.all(np_data == d) + np_data = np.array(d, dtype='i4') + assert np.all(np_data == d) + + def test_convert_unit(self, Column): + d = Column([1, 2, 3], name='a', dtype="f8", unit="m") + d.convert_unit_to("km") + assert np.all(d.data == [0.001, 0.002, 0.003]) + + def test_array_wrap(self): + """Test that the __array_wrap__ method converts a reduction ufunc + output that has a different shape into an ndarray view. Without this a + method call like c.mean() returns a Column array object with length=1.""" + # Mean and sum for a 1-d float column + c = table.Column(name='a', data=[1., 2., 3.]) + assert np.allclose(c.mean(), 2.0) + assert isinstance(c.mean(), (np.floating, float)) + assert np.allclose(c.sum(), 6.) + assert isinstance(c.sum(), (np.floating, float)) + + # Non-reduction ufunc preserves Column class + assert isinstance(np.cos(c), table.Column) + + # Sum for a 1-d int column + c = table.Column(name='a', data=[1, 2, 3]) + assert np.allclose(c.sum(), 6) + assert isinstance(c.sum(), (np.integer, int)) + + # Sum for a 2-d int column + c = table.Column(name='a', data=[[1, 2, 3], + [4, 5, 6]]) + assert c.sum() == 21 + assert isinstance(c.sum(), (np.integer, int)) + assert np.all(c.sum(axis=0) == [5, 7, 9]) + assert c.sum(axis=0).shape == (3,) + assert isinstance(c.sum(axis=0), np.ndarray) + + # Sum and mean for a 1-d masked column + c = table.MaskedColumn(name='a', data=[1., 2., 3.], mask=[0, 0, 1]) + assert np.allclose(c.mean(), 1.5) + assert isinstance(c.mean(), (np.floating, float)) + assert np.allclose(c.sum(), 3.) + assert isinstance(c.sum(), (np.floating, float)) + + def test_name_none(self, Column): + """Can create a column without supplying name, which defaults to None""" + c = Column([1, 2]) + assert c.name is None + assert np.all(c == np.array([1, 2])) + + def test_quantity_init(self, Column): + + c = Column(data=np.array([1, 2, 3]) * u.m) + assert np.all(c.data == np.array([1, 2, 3])) + assert np.all(c.unit == u.m) + + c = Column(data=np.array([1, 2, 3]) * u.m, unit=u.cm) + assert np.all(c.data == np.array([100, 200, 300])) + assert np.all(c.unit == u.cm) + + def test_attrs_survive_getitem_after_change(self, Column): + """ + Test for issue #3023: when calling getitem with a MaskedArray subclass + the original object attributes are not copied. + """ + c1 = Column([1, 2, 3], name='a', unit='m', format='i', + description='aa', meta={'a': 1}) + c1.name = 'b' + c1.unit = 'km' + c1.format = 'i2' + c1.description = 'bb' + c1.meta = {'bbb': 2} + + for item in (slice(None, None), slice(None, 1), np.array([0, 2]), + np.array([False, True, False])): + c2 = c1[item] + assert c2.name == 'b' + assert c2.unit is u.km + assert c2.format == 'i2' + assert c2.description == 'bb' + assert c2.meta == {'bbb': 2} + + # Make sure that calling getitem resulting in a scalar does + # not copy attributes. + val = c1[1] + for attr in ('name', 'unit', 'format', 'description', 'meta'): + assert not hasattr(val, attr) + + def test_to_quantity(self, Column): + d = Column([1, 2, 3], name='a', dtype="f8", unit="m") + + assert np.all(d.quantity == ([1, 2, 3.] * u.m)) + assert np.all(d.quantity.value == ([1, 2, 3.] * u.m).value) + assert np.all(d.quantity == d.to('m')) + assert np.all(d.quantity.value == d.to('m').value) + + np.testing.assert_allclose(d.to(u.km).value, ([.001, .002, .003] * u.km).value) + np.testing.assert_allclose(d.to('km').value, ([.001, .002, .003] * u.km).value) + + np.testing.assert_allclose(d.to(u.MHz, u.equivalencies.spectral()).value, + [299.792458, 149.896229, 99.93081933]) + + d_nounit = Column([1, 2, 3], name='a', dtype="f8", unit=None) + with pytest.raises(u.UnitsError): + d_nounit.to(u.km) + assert np.all(d_nounit.to(u.dimensionless_unscaled) == np.array([1, 2, 3])) + + # make sure the correct copy/no copy behavior is happening + q = [1, 3, 5]*u.km + + # to should always make a copy + d.to(u.km)[:] = q + np.testing.assert_allclose(d, [1, 2, 3]) + + # explcit copying of the quantity should not change the column + d.quantity.copy()[:] = q + np.testing.assert_allclose(d, [1, 2, 3]) + + # but quantity directly is a "view", accessing the underlying column + d.quantity[:] = q + np.testing.assert_allclose(d, [1000, 3000, 5000]) + + # view should also work for integers + d2 = Column([1, 2, 3], name='a', dtype=int, unit="m") + d2.quantity[:] = q + np.testing.assert_allclose(d2, [1000, 3000, 5000]) + + # but it should fail for strings or other non-numeric tables + d3 = Column(['arg', 'name', 'stuff'], name='a', unit="m") + with pytest.raises(TypeError): + d3.quantity + + def test_item_access_type(self, Column): + """ + Tests for #3095, which forces integer item access to always return a plain + ndarray or MaskedArray, even in the case of a multi-dim column. + """ + integer_types = (int, long, np.int) if six.PY2 else (int, np.int) + + for int_type in integer_types: + c = Column([[1, 2], [3, 4]]) + i0 = int_type(0) + i1 = int_type(1) + assert np.all(c[i0] == [1, 2]) + assert type(c[i0]) == (np.ma.MaskedArray if hasattr(Column, 'mask') else np.ndarray) + assert c[i0].shape == (2,) + + c01 = c[i0:i1] + assert np.all(c01 == [[1, 2]]) + assert isinstance(c01, Column) + assert c01.shape == (1, 2) + + c = Column([1, 2]) + assert np.all(c[i0] == 1) + assert isinstance(c[i0], np.integer) + assert c[i0].shape == () + + c01 = c[i0:i1] + assert np.all(c01 == [1]) + assert isinstance(c01, Column) + assert c01.shape == (1,) + + def test_insert_basic(self, Column): + c = Column([0, 1, 2], name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + + # Basic insert + c1 = c.insert(1, 100) + assert np.all(c1 == [0, 100, 1, 2]) + assert c1.attrs_equal(c) + assert type(c) is type(c1) + if hasattr(c1, 'mask'): + assert c1.data.shape == c1.mask.shape + + c1 = c.insert(-1, 100) + assert np.all(c1 == [0, 1, 100, 2]) + + c1 = c.insert(3, 100) + assert np.all(c1 == [0, 1, 2, 100]) + + c1 = c.insert(-3, 100) + assert np.all(c1 == [100, 0, 1, 2]) + + c1 = c.insert(1, [100, 200, 300]) + if hasattr(c1, 'mask'): + assert c1.data.shape == c1.mask.shape + + # Out of bounds index + with pytest.raises((ValueError, IndexError)): + c1 = c.insert(-4, 100) + with pytest.raises((ValueError, IndexError)): + c1 = c.insert(4, 100) + + def test_insert_axis(self, Column): + """Insert with non-default axis kwarg""" + c = Column([[1, 2], [3, 4]]) + + c1 = c.insert(1, [5, 6], axis=None) + assert np.all(c1 == [1, 5, 6, 2, 3, 4]) + + c1 = c.insert(1, [5, 6], axis=1) + assert np.all(c1 == [[1, 5, 2], [3, 6, 4]]) + + def test_insert_multidim(self, Column): + c = Column([[1, 2], + [3, 4]], name='a', dtype=int) + + # Basic insert + c1 = c.insert(1, [100, 200]) + assert np.all(c1 == [[1, 2], [100, 200], [3, 4]]) + + # Broadcast + c1 = c.insert(1, 100) + assert np.all(c1 == [[1, 2], [100, 100], [3, 4]]) + + # Wrong shape + with pytest.raises(ValueError): + c1 = c.insert(1, [100, 200, 300]) + + def test_insert_object(self, Column): + c = Column(['a', 1, None], name='a', dtype=object) + + # Basic insert + c1 = c.insert(1, [100, 200]) + assert np.all(c1 == ['a', [100, 200], 1, None]) + + def test_insert_masked(self): + c = table.MaskedColumn([0, 1, 2], name='a', mask=[False, True, False]) + + # Basic insert + c1 = c.insert(1, 100) + assert np.all(c1.data.data == [0, 100, 1, 2]) + assert np.all(c1.data.mask == [False, False, True, False]) + assert type(c) is type(c1) + + for mask in (False, True): + c1 = c.insert(1, 100, mask=mask) + assert np.all(c1.data.data == [0, 100, 1, 2]) + assert np.all(c1.data.mask == [False, mask, True, False]) + + def test_insert_masked_multidim(self): + c = table.MaskedColumn([[1, 2], + [3, 4]], name='a', dtype=int) + + c1 = c.insert(1, [100, 200], mask=True) + assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]]) + assert np.all(c1.data.mask == [[False, False], [True, True], [False, False]]) + + c1 = c.insert(1, [100, 200], mask=[True, False]) + assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]]) + assert np.all(c1.data.mask == [[False, False], [True, False], [False, False]]) + + with pytest.raises(ValueError): + c1 = c.insert(1, [100, 200], mask=[True, False, True]) + + def test_mask_on_non_masked_table(self): + """ + When table is not masked and trying to set mask on column then + it's Raise AttributeError. + """ + + t = table.Table([[1, 2], [3, 4]], names=('a', 'b'), dtype=('i4', 'f8')) + + with pytest.raises(AttributeError): + t['a'].mask = [True, False] + + +class TestAttrEqual(): + """Bunch of tests originally from ATpy that test the attrs_equal method.""" + + def test_5(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy') + c2 = Column(name='a', dtype=int, unit='mJy') + assert c1.attrs_equal(c2) + + def test_6(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + assert c1.attrs_equal(c2) + + def test_7(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='b', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + assert not c1.attrs_equal(c2) + + def test_8(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='a', dtype=float, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + assert not c1.attrs_equal(c2) + + def test_9(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='a', dtype=int, unit='erg.cm-2.s-1.Hz-1', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + assert not c1.attrs_equal(c2) + + def test_10(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='a', dtype=int, unit='mJy', format='%g', + description='test column', meta={'c': 8, 'd': 12}) + assert not c1.attrs_equal(c2) + + def test_11(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='another test column', meta={'c': 8, 'd': 12}) + assert not c1.attrs_equal(c2) + + def test_12(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'e': 8, 'd': 12}) + assert not c1.attrs_equal(c2) + + def test_13(self, Column): + c1 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 9, 'd': 12}) + assert not c1.attrs_equal(c2) + + def test_col_and_masked_col(self): + c1 = table.Column(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + c2 = table.MaskedColumn(name='a', dtype=int, unit='mJy', format='%i', + description='test column', meta={'c': 8, 'd': 12}) + assert c1.attrs_equal(c2) + assert c2.attrs_equal(c1) + +# Check that the meta descriptor is working as expected. The MetaBaseTest class +# takes care of defining all the tests, and we simply have to define the class +# and any minimal set of args to pass. + + +from ...utils.tests.test_metadata import MetaBaseTest + + +class TestMetaColumn(MetaBaseTest): + test_class = table.Column + args = () + + +class TestMetaMaskedColumn(MetaBaseTest): + test_class = table.MaskedColumn + args = () + + +def test_getitem_metadata_regression(): + """ + Regression test for #1471: MaskedArray does not call __array_finalize__ so + the meta-data was not getting copied over. By overloading _update_from we + are able to work around this bug. + """ + + # Make sure that meta-data gets propagated with __getitem__ + + c = table.Column(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8}) + assert c[1:2].name == 'a' + assert c[1:2].description == 'b' + assert c[1:2].unit == 'm' + assert c[1:2].format == '%i' + assert c[1:2].meta['c'] == 8 + + c = table.MaskedColumn(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8}) + assert c[1:2].name == 'a' + assert c[1:2].description == 'b' + assert c[1:2].unit == 'm' + assert c[1:2].format == '%i' + assert c[1:2].meta['c'] == 8 + + # As above, but with take() - check the method and the function + + c = table.Column(data=[1, 2, 3], name='a', description='b', unit='m', format="%i", meta={'c': 8}) + for subset in [c.take([0, 1]), np.take(c, [0, 1])]: + assert subset.name == 'a' + assert subset.description == 'b' + assert subset.unit == 'm' + assert subset.format == '%i' + assert subset.meta['c'] == 8 + + # Metadata isn't copied for scalar values + for subset in [c.take(0), np.take(c, 0)]: + assert subset == 1 + assert subset.shape == () + assert not isinstance(subset, table.Column) + + c = table.MaskedColumn(data=[1, 2, 3], name='a', description='b', unit='m', format="%i", meta={'c': 8}) + for subset in [c.take([0, 1]), np.take(c, [0, 1])]: + assert subset.name == 'a' + assert subset.description == 'b' + assert subset.unit == 'm' + assert subset.format == '%i' + assert subset.meta['c'] == 8 + + # Metadata isn't copied for scalar values + for subset in [c.take(0), np.take(c, 0)]: + assert subset == 1 + assert subset.shape == () + assert not isinstance(subset, table.MaskedColumn) + + +def test_unicode_guidelines(): + arr = np.array([1, 2, 3]) + c = table.Column(arr, name='a') + + assert_follows_unicode_guidelines(c) + + +def test_scalar_column(): + """ + Column is not designed to hold scalars, but for numpy 1.6 this can happen: + + >> type(np.std(table.Column([1, 2]))) + astropy.table.column.Column + """ + c = table.Column(1.5) + assert repr(c) == '1.5' + assert str(c) == '1.5' + + +def test_qtable_column_conversion(): + """ + Ensures that a QTable that gets assigned a unit switches to be Quantity-y + """ + qtab = table.QTable([[1, 2], [3, 4.2]], names=['i', 'f']) + + assert isinstance(qtab['i'], table.column.Column) + assert isinstance(qtab['f'], table.column.Column) + + qtab['i'].unit = 'km/s' + assert isinstance(qtab['i'], u.Quantity) + assert isinstance(qtab['f'], table.column.Column) + + # should follow from the above, but good to make sure as a #4497 regression test + assert isinstance(qtab['i'][0], u.Quantity) + assert isinstance(qtab[0]['i'], u.Quantity) + assert not isinstance(qtab['f'][0], u.Quantity) + assert not isinstance(qtab[0]['f'], u.Quantity) + + # Regression test for #5342: if a function unit is assigned, the column + # should become the appropriate FunctionQuantity subclass. + qtab['f'].unit = u.dex(u.cm/u.s**2) + assert isinstance(qtab['f'], u.Dex) + + +@pytest.mark.parametrize('masked', [True, False]) +def test_string_truncation_warning(masked): + """ + Test warnings associated with in-place assignment to a string + column that results in truncation of the right hand side. + """ + t = table.Table([['aa', 'bb']], names=['a'], masked=masked) + + with catch_warnings() as w: + from inspect import currentframe, getframeinfo + t['a'][1] = 'cc' + assert len(w) == 0 + + t['a'][:] = 'dd' + assert len(w) == 0 + + with catch_warnings() as w: + frameinfo = getframeinfo(currentframe()) + t['a'][0] = 'eee' # replace item with string that gets truncated + assert t['a'][0] == 'ee' + assert len(w) == 1 + assert ('truncated right side string(s) longer than 2 character(s)' + in str(w[0].message)) + + # Make sure the warning points back to the user code line + assert w[0].lineno == frameinfo.lineno + 1 + assert w[0].category is table.StringTruncateWarning + assert 'test_column' in w[0].filename + + with catch_warnings() as w: + t['a'][:] = ['ff', 'ggg'] # replace item with string that gets truncated + assert np.all(t['a'] == ['ff', 'gg']) + assert len(w) == 1 + assert ('truncated right side string(s) longer than 2 character(s)' + in str(w[0].message)) + + with catch_warnings() as w: + # Test the obscure case of assigning from an array that was originally + # wider than any of the current elements (i.e. dtype is U4 but actual + # elements are U1 at the time of assignment). + val = np.array(['ffff', 'gggg']) + val[:] = ['f', 'g'] + t['a'][:] = val + assert np.all(t['a'] == ['f', 'g']) + assert len(w) == 0 + + +def test_string_truncation_warning_masked(): + """ + Test warnings associated with in-place assignment to a string + to a masked column, specifically where the right hand side + contains np.ma.masked. + """ + + # Test for strings, but also cover assignment of np.ma.masked to + # int and float masked column setting. This was previously only + # covered in an unrelated io.ascii test (test_line_endings) which + # showed an unexpected difference between handling of str and numeric + # masked arrays. + for values in (['a', 'b'], [1, 2], [1.0, 2.0]): + mc = table.MaskedColumn(values) + + with catch_warnings() as w: + mc[1] = np.ma.masked + assert len(w) == 0 + assert np.all(mc.mask == [False, True]) + + mc[:] = np.ma.masked + assert len(w) == 0 + assert np.all(mc.mask == [True, True]) + + mc = table.MaskedColumn(['aa', 'bb']) + + with catch_warnings() as w: + mc[:] = [np.ma.masked, 'ggg'] # replace item with string that gets truncated + assert mc[1] == 'gg' + assert np.all(mc.mask == [True, False]) + assert len(w) == 1 + assert ('truncated right side string(s) longer than 2 character(s)' + in str(w[0].message)) + + +@pytest.mark.skipif('six.PY2') +@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn)) +def test_col_unicode_sandwich_create_from_str(Column): + """ + Create a bytestring Column from strings (including unicode) in Py3. + """ + # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding. + # Stress the system by injecting non-ASCII characters. + uba = u'bä' + c = Column([uba, 'def'], dtype='S') + assert c.dtype.char == 'S' + assert c[0] == uba + assert isinstance(c[0], str) + assert isinstance(c[:0], table.Column) + assert np.all(c[:2] == np.array([uba, 'def'])) + + +@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn)) +def test_col_unicode_sandwich_bytes(Column): + """ + Create a bytestring Column from bytes and ensure that it works in Python 3 in + a convenient way like in Python 2. + """ + # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding. + # Stress the system by injecting non-ASCII characters. + uba = 'ba' if six.PY2 else u'bä' + uba8 = uba.encode('utf-8') + c = Column([uba8, b'def']) + assert c.dtype.char == 'S' + assert c[0] == uba8 if six.PY2 else uba # Can compare utf-8 directly only in PY3 + assert isinstance(c[0], str) + assert isinstance(c[:0], table.Column) + assert np.all(c[:2] == np.array([uba, 'def'])) + + assert isinstance(c[:], table.Column) + assert c[:].dtype.char == 'S' + + # Array / list comparisons + if not six.PY2: + assert np.all(c == [uba, 'def']) + + ok = c == [uba8, b'def'] + assert type(ok) is type(c.data) + assert ok.dtype.char == '?' + assert np.all(ok) + + assert np.all(c == np.array([uba, u'def'])) + if not six.PY2: + assert np.all(c == np.array([uba8, b'def'])) + + # Scalar compare + cmps = (uba8,) if six.PY2 else (uba, uba8) + for cmp in cmps: + ok = c == cmp + assert type(ok) is type(c.data) + assert np.all(ok == [True, False]) + + +def test_col_unicode_sandwich_unicode(): + """ + Sanity check that Unicode Column behaves normally. + """ + # On Py2 the unicode must be ASCII-compatible, else the final test fails. + uba = 'ba' if six.PY2 else u'bä' + uba8 = uba.encode('utf-8') + + c = table.Column([uba, 'def'], dtype='U') + assert c[0] == uba + assert isinstance(c[:0], table.Column) + assert isinstance(c[0], six.text_type) + assert np.all(c[:2] == np.array([uba, 'def'])) + + assert isinstance(c[:], table.Column) + assert c[:].dtype.char == 'U' + + ok = c == [uba, 'def'] + assert type(ok) == np.ndarray + assert ok.dtype.char == '?' + assert np.all(ok) + + # In PY2 unicode is equal to bytestrings but not in PY3 + if six.PY2: + assert np.all(c == [uba8, b'def']) + else: + assert np.all(c != [uba8, b'def']) + + +def test_masked_col_unicode_sandwich(): + """ + Create a bytestring MaskedColumn and ensure that it works in Python 3 in + a convenient way like in Python 2. + """ + c = table.MaskedColumn([b'abc', b'def']) + c[1] = np.ma.masked + assert isinstance(c[:0], table.MaskedColumn) + assert isinstance(c[0], str) + + assert c[0] == 'abc' + assert c[1] is np.ma.masked + + assert isinstance(c[:], table.MaskedColumn) + assert c[:].dtype.char == 'S' + + ok = c == ['abc', 'def'] + assert ok[0] == True + assert ok[1] is np.ma.masked + assert np.all(c == [b'abc', b'def']) + assert np.all(c == np.array([u'abc', u'def'])) + assert np.all(c == np.array([b'abc', b'def'])) + + for cmp in (u'abc', b'abc'): + ok = c == cmp + assert type(ok) is np.ma.MaskedArray + assert ok[0] == True + assert ok[1] is np.ma.masked + + +@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn)) +def test_unicode_sandwich_set(Column): + """ + Test setting + """ + uba = 'ba' if six.PY2 else u'bä' + + c = Column([b'abc', b'def']) + + c[0] = b'aa' + assert np.all(c == [u'aa', u'def']) + + c[0] = uba # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding + assert np.all(c == [uba, u'def']) + assert c.pformat() == [u'None', u'----', ' ' + uba, u' def'] + + c[:] = b'cc' + assert np.all(c == [u'cc', u'cc']) + + c[:] = uba + assert np.all(c == [uba, uba]) + + c[:] = '' + c[:] = [uba, b'def'] + assert np.all(c == [uba, b'def']) + + +@pytest.mark.parametrize('class1', [table.MaskedColumn, table.Column]) +@pytest.mark.parametrize('class2', [table.MaskedColumn, table.Column, str, list]) +def test_unicode_sandwich_compare(class1, class2): + """Test that comparing a bytestring Column/MaskedColumn with various + str (unicode) object types gives the expected result. Tests #6838. + """ + obj1 = class1([b'a', b'c']) + if class2 is str: + obj2 = str('a') + elif class2 is list: + obj2 = ['a', 'b'] + else: + obj2 = class2(['a', 'b']) + + if six.PY2 and class2 == str: + return pytest.skip() + + assert np.all((obj1 == obj2) == [True, False]) + assert np.all((obj2 == obj1) == [True, False]) + + assert np.all((obj1 != obj2) == [False, True]) + assert np.all((obj2 != obj1) == [False, True]) + + assert np.all((obj1 > obj2) == [False, True]) + assert np.all((obj2 > obj1) == [False, False]) + + assert np.all((obj1 <= obj2) == [True, False]) + assert np.all((obj2 <= obj1) == [True, True]) + + assert np.all((obj1 < obj2) == [False, False]) + assert np.all((obj2 < obj1) == [False, True]) + + assert np.all((obj1 >= obj2) == [True, True]) + assert np.all((obj2 >= obj1) == [True, False]) + + +def test_unicode_sandwich_masked_compare(): + """Test the fix for #6839 from #6899.""" + c1 = table.MaskedColumn(['a', 'b', 'c', 'd'], + mask=[True, False, True, False]) + c2 = table.MaskedColumn([b'a', b'b', b'c', b'd'], + mask=[True, True, False, False]) + + for cmp in ((c1 == c2), (c2 == c1)): + assert cmp[0] is np.ma.masked + assert cmp[1] is np.ma.masked + assert cmp[2] is np.ma.masked + assert cmp[3] + + for cmp in ((c1 != c2), (c2 != c1)): + assert cmp[0] is np.ma.masked + assert cmp[1] is np.ma.masked + assert cmp[2] is np.ma.masked + assert not cmp[3] + + # Note: comparisons <, >, >=, <= fail to return a masked array entirely, + # see https://github.com/numpy/numpy/issues/10092. diff --git a/astropy/table/tests/test_groups.py b/astropy/table/tests/test_groups.py new file mode 100644 index 0000000..8091c53 --- /dev/null +++ b/astropy/table/tests/test_groups.py @@ -0,0 +1,581 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +import pytest +import numpy as np + +from ...tests.helper import catch_warnings +from ...table import Table, Column +from ...utils.exceptions import AstropyUserWarning + + +def sort_eq(list1, list2): + return sorted(list1) == sorted(list2) + + +def test_column_group_by(T1): + for masked in (False, True): + t1 = Table(T1, masked=masked) + t1a = t1['a'].copy() + + # Group by a Column (i.e. numpy array) + t1ag = t1a.group_by(t1['a']) + assert np.all(t1ag.groups.indices == np.array([0, 1, 4, 8])) + + # Group by a Table + t1ag = t1a.group_by(t1['a', 'b']) + assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) + + # Group by a numpy structured array + t1ag = t1a.group_by(t1['a', 'b'].as_array()) + assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) + + +def test_table_group_by(T1): + """ + Test basic table group_by functionality for possible key types and for + masked/unmasked tables. + """ + for masked in (False, True): + t1 = Table(T1, masked=masked) + # Group by a single column key specified by name + tg = t1.group_by('a') + assert np.all(tg.groups.indices == np.array([0, 1, 4, 8])) + assert str(tg.groups) == "" + assert str(tg['a'].groups) == "" + + # Sorted by 'a' and in original order for rest + assert tg.pformat() == [' a b c d ', + '--- --- --- ---', + ' 0 a 0.0 4', + ' 1 b 3.0 5', + ' 1 a 2.0 6', + ' 1 a 1.0 7', + ' 2 c 7.0 0', + ' 2 b 5.0 1', + ' 2 b 6.0 2', + ' 2 a 4.0 3'] + assert tg.meta['ta'] == 1 + assert tg['c'].meta['a'] == 1 + assert tg['c'].description == 'column c' + + # Group by a table column + tg2 = t1.group_by(t1['a']) + assert tg.pformat() == tg2.pformat() + + # Group by two columns spec'd by name + for keys in (['a', 'b'], ('a', 'b')): + tg = t1.group_by(keys) + assert np.all(tg.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) + # Sorted by 'a', 'b' and in original order for rest + assert tg.pformat() == [' a b c d ', + '--- --- --- ---', + ' 0 a 0.0 4', + ' 1 a 2.0 6', + ' 1 a 1.0 7', + ' 1 b 3.0 5', + ' 2 a 4.0 3', + ' 2 b 5.0 1', + ' 2 b 6.0 2', + ' 2 c 7.0 0'] + + # Group by a Table + tg2 = t1.group_by(t1['a', 'b']) + assert tg.pformat() == tg2.pformat() + + # Group by a structured array + tg2 = t1.group_by(t1['a', 'b'].as_array()) + assert tg.pformat() == tg2.pformat() + + # Group by a simple ndarray + tg = t1.group_by(np.array([0, 1, 0, 1, 2, 1, 0, 0])) + assert np.all(tg.groups.indices == np.array([0, 4, 7, 8])) + assert tg.pformat() == [' a b c d ', + '--- --- --- ---', + ' 2 c 7.0 0', + ' 2 b 6.0 2', + ' 1 a 2.0 6', + ' 1 a 1.0 7', + ' 2 b 5.0 1', + ' 2 a 4.0 3', + ' 1 b 3.0 5', + ' 0 a 0.0 4'] + + +def test_groups_keys(T1): + tg = T1.group_by('a') + keys = tg.groups.keys + assert keys.dtype.names == ('a',) + assert np.all(keys['a'] == np.array([0, 1, 2])) + + tg = T1.group_by(['a', 'b']) + keys = tg.groups.keys + assert keys.dtype.names == ('a', 'b') + assert np.all(keys['a'] == np.array([0, 1, 1, 2, 2, 2])) + assert np.all(keys['b'] == np.array(['a', 'a', 'b', 'a', 'b', 'c'])) + + # Grouping by Column ignores column name + tg = T1.group_by(T1['b']) + keys = tg.groups.keys + assert keys.dtype.names is None + + +def test_groups_iterator(T1): + tg = T1.group_by('a') + for ii, group in enumerate(tg.groups): + assert group.pformat() == tg.groups[ii].pformat() + assert group['a'][0] == tg['a'][tg.groups.indices[ii]] + + +def test_grouped_copy(T1): + """ + Test that copying a table or column copies the groups properly + """ + for masked in (False, True): + t1 = Table(T1, masked=masked) + tg = t1.group_by('a') + tgc = tg.copy() + assert np.all(tgc.groups.indices == tg.groups.indices) + assert np.all(tgc.groups.keys == tg.groups.keys) + + tac = tg['a'].copy() + assert np.all(tac.groups.indices == tg['a'].groups.indices) + + c1 = t1['a'].copy() + gc1 = c1.group_by(t1['a']) + gc1c = gc1.copy() + assert np.all(gc1c.groups.indices == np.array([0, 1, 4, 8])) + + +def test_grouped_slicing(T1): + """ + Test that slicing a table removes previous grouping + """ + + for masked in (False, True): + t1 = Table(T1, masked=masked) + + # Regular slice of a table + tg = t1.group_by('a') + tg2 = tg[3:5] + assert np.all(tg2.groups.indices == np.array([0, len(tg2)])) + assert tg2.groups.keys is None + + +def test_group_column_from_table(T1): + """ + Group a column that is part of a table + """ + cg = T1['c'].group_by(np.array(T1['a'])) + assert np.all(cg.groups.keys == np.array([0, 1, 2])) + assert np.all(cg.groups.indices == np.array([0, 1, 4, 8])) + + +def test_table_groups_mask_index(T1): + """ + Use boolean mask as item in __getitem__ for groups + """ + for masked in (False, True): + t1 = Table(T1, masked=masked).group_by('a') + + t2 = t1.groups[np.array([True, False, True])] + assert len(t2.groups) == 2 + assert t2.groups[0].pformat() == t1.groups[0].pformat() + assert t2.groups[1].pformat() == t1.groups[2].pformat() + assert np.all(t2.groups.keys['a'] == np.array([0, 2])) + + +def test_table_groups_array_index(T1): + """ + Use numpy array as item in __getitem__ for groups + """ + for masked in (False, True): + t1 = Table(T1, masked=masked).group_by('a') + + t2 = t1.groups[np.array([0, 2])] + assert len(t2.groups) == 2 + assert t2.groups[0].pformat() == t1.groups[0].pformat() + assert t2.groups[1].pformat() == t1.groups[2].pformat() + assert np.all(t2.groups.keys['a'] == np.array([0, 2])) + + +def test_table_groups_slicing(T1): + """ + Test that slicing table groups works + """ + + for masked in (False, True): + t1 = Table(T1, masked=masked).group_by('a') + + # slice(0, 2) + t2 = t1.groups[0:2] + assert len(t2.groups) == 2 + assert t2.groups[0].pformat() == t1.groups[0].pformat() + assert t2.groups[1].pformat() == t1.groups[1].pformat() + assert np.all(t2.groups.keys['a'] == np.array([0, 1])) + + # slice(1, 2) + t2 = t1.groups[1:2] + assert len(t2.groups) == 1 + assert t2.groups[0].pformat() == t1.groups[1].pformat() + assert np.all(t2.groups.keys['a'] == np.array([1])) + + # slice(0, 3, 2) + t2 = t1.groups[0:3:2] + assert len(t2.groups) == 2 + assert t2.groups[0].pformat() == t1.groups[0].pformat() + assert t2.groups[1].pformat() == t1.groups[2].pformat() + assert np.all(t2.groups.keys['a'] == np.array([0, 2])) + + +def test_grouped_item_access(T1): + """ + Test that column slicing preserves grouping + """ + for masked in (False, True): + t1 = Table(T1, masked=masked) + + # Regular slice of a table + tg = t1.group_by('a') + tgs = tg['a', 'c', 'd'] + assert np.all(tgs.groups.keys == tg.groups.keys) + assert np.all(tgs.groups.indices == tg.groups.indices) + tgsa = tgs.groups.aggregate(np.sum) + assert tgsa.pformat() == [' a c d ', + '--- ---- ---', + ' 0 0.0 4', + ' 1 6.0 18', + ' 2 22.0 6'] + + tgs = tg['c', 'd'] + assert np.all(tgs.groups.keys == tg.groups.keys) + assert np.all(tgs.groups.indices == tg.groups.indices) + tgsa = tgs.groups.aggregate(np.sum) + assert tgsa.pformat() == [' c d ', + '---- ---', + ' 0.0 4', + ' 6.0 18', + '22.0 6'] + + +def test_mutable_operations(T1): + """ + Operations like adding or deleting a row should removing grouping, + but adding or removing or renaming a column should retain grouping. + """ + for masked in (False, True): + t1 = Table(T1, masked=masked) + + # add row + tg = t1.group_by('a') + tg.add_row((0, 'a', 3.0, 4)) + assert np.all(tg.groups.indices == np.array([0, len(tg)])) + assert tg.groups.keys is None + + # remove row + tg = t1.group_by('a') + tg.remove_row(4) + assert np.all(tg.groups.indices == np.array([0, len(tg)])) + assert tg.groups.keys is None + + # add column + tg = t1.group_by('a') + indices = tg.groups.indices.copy() + tg.add_column(Column(name='e', data=np.arange(len(tg)))) + assert np.all(tg.groups.indices == indices) + assert np.all(tg['e'].groups.indices == indices) + assert np.all(tg['e'].groups.keys == tg.groups.keys) + + # remove column (not key column) + tg = t1.group_by('a') + tg.remove_column('b') + assert np.all(tg.groups.indices == indices) + # Still has original key col names + assert tg.groups.keys.dtype.names == ('a',) + assert np.all(tg['a'].groups.indices == indices) + + # remove key column + tg = t1.group_by('a') + tg.remove_column('a') + assert np.all(tg.groups.indices == indices) + assert tg.groups.keys.dtype.names == ('a',) + assert np.all(tg['b'].groups.indices == indices) + + # rename key column + tg = t1.group_by('a') + tg.rename_column('a', 'aa') + assert np.all(tg.groups.indices == indices) + assert tg.groups.keys.dtype.names == ('a',) + assert np.all(tg['aa'].groups.indices == indices) + + +def test_group_by_masked(T1): + t1m = Table(T1, masked=True) + t1m['c'].mask[4] = True + t1m['d'].mask[5] = True + assert t1m.group_by('a').pformat() == [' a b c d ', + '--- --- --- ---', + ' 0 a -- 4', + ' 1 b 3.0 --', + ' 1 a 2.0 6', + ' 1 a 1.0 7', + ' 2 c 7.0 0', + ' 2 b 5.0 1', + ' 2 b 6.0 2', + ' 2 a 4.0 3'] + + +def test_group_by_errors(T1): + """ + Appropriate errors get raised. + """ + # Bad column name as string + with pytest.raises(ValueError): + T1.group_by('f') + + # Bad column names in list + with pytest.raises(ValueError): + T1.group_by(['f', 'g']) + + # Wrong length array + with pytest.raises(ValueError): + T1.group_by(np.array([1, 2])) + + # Wrong type + with pytest.raises(TypeError): + T1.group_by(None) + + # Masked key column + t1 = Table(T1, masked=True) + t1['a'].mask[4] = True + with pytest.raises(ValueError): + t1.group_by('a') + + +def test_groups_keys_meta(T1): + """ + Make sure the keys meta['grouped_by_table_cols'] is working. + """ + # Group by column in this table + tg = T1.group_by('a') + assert tg.groups.keys.meta['grouped_by_table_cols'] is True + assert tg['c'].groups.keys.meta['grouped_by_table_cols'] is True + assert tg.groups[1].groups.keys.meta['grouped_by_table_cols'] is True + assert (tg['d'].groups[np.array([False, True, True])] + .groups.keys.meta['grouped_by_table_cols'] is True) + + # Group by external Table + tg = T1.group_by(T1['a', 'b']) + assert tg.groups.keys.meta['grouped_by_table_cols'] is False + assert tg['c'].groups.keys.meta['grouped_by_table_cols'] is False + assert tg.groups[1].groups.keys.meta['grouped_by_table_cols'] is False + + # Group by external numpy array + tg = T1.group_by(T1['a', 'b'].as_array()) + assert not hasattr(tg.groups.keys, 'meta') + assert not hasattr(tg['c'].groups.keys, 'meta') + + # Group by Column + tg = T1.group_by(T1['a']) + assert 'grouped_by_table_cols' not in tg.groups.keys.meta + assert 'grouped_by_table_cols' not in tg['c'].groups.keys.meta + + +def test_table_aggregate(T1): + """ + Aggregate a table + """ + # Table with only summable cols + t1 = T1['a', 'c', 'd'] + tg = t1.group_by('a') + tga = tg.groups.aggregate(np.sum) + assert tga.pformat() == [' a c d ', + '--- ---- ---', + ' 0 0.0 4', + ' 1 6.0 18', + ' 2 22.0 6'] + # Reverts to default groups + assert np.all(tga.groups.indices == np.array([0, 3])) + assert tga.groups.keys is None + + # metadata survives + assert tga.meta['ta'] == 1 + assert tga['c'].meta['a'] == 1 + assert tga['c'].description == 'column c' + + # Aggregate with np.sum with masked elements. This results + # in one group with no elements, hence a nan result and conversion + # to float for the 'd' column. + t1m = Table(t1, masked=True) + t1m['c'].mask[4:6] = True + t1m['d'].mask[4:6] = True + tg = t1m.group_by('a') + with catch_warnings(Warning) as warning_lines: + tga = tg.groups.aggregate(np.sum) + assert warning_lines[0].category == UserWarning + assert "converting a masked element to nan" in str(warning_lines[0].message) + + assert tga.pformat() == [' a c d ', + '--- ---- ----', + ' 0 nan nan', + ' 1 3.0 13.0', + ' 2 22.0 6.0'] + + # Aggregrate with np.sum with masked elements, but where every + # group has at least one remaining (unmasked) element. Then + # the int column stays as an int. + t1m = Table(t1, masked=True) + t1m['c'].mask[5] = True + t1m['d'].mask[5] = True + tg = t1m.group_by('a') + tga = tg.groups.aggregate(np.sum) + assert tga.pformat() == [' a c d ', + '--- ---- ---', + ' 0 0.0 4', + ' 1 3.0 13', + ' 2 22.0 6'] + + # Aggregate with a column type that cannot by supplied to the aggregating + # function. This raises a warning but still works. + tg = T1.group_by('a') + with catch_warnings(Warning) as warning_lines: + tga = tg.groups.aggregate(np.sum) + assert warning_lines[0].category == AstropyUserWarning + assert "Cannot aggregate column" in str(warning_lines[0].message) + assert tga.pformat() == [' a c d ', + '--- ---- ---', + ' 0 0.0 4', + ' 1 6.0 18', + ' 2 22.0 6'] + + +def test_table_aggregate_reduceat(T1): + """ + Aggregate table with functions which have a reduceat method + """ + # Comparison functions without reduceat + def np_mean(x): + return np.mean(x) + + def np_sum(x): + return np.sum(x) + + def np_add(x): + return np.add(x) + + # Table with only summable cols + t1 = T1['a', 'c', 'd'] + tg = t1.group_by('a') + # Comparison + tga_r = tg.groups.aggregate(np.sum) + tga_a = tg.groups.aggregate(np.add) + tga_n = tg.groups.aggregate(np_sum) + + assert np.all(tga_r == tga_n) + assert np.all(tga_a == tga_n) + assert tga_n.pformat() == [' a c d ', + '--- ---- ---', + ' 0 0.0 4', + ' 1 6.0 18', + ' 2 22.0 6'] + + tga_r = tg.groups.aggregate(np.mean) + tga_n = tg.groups.aggregate(np_mean) + assert np.all(tga_r == tga_n) + assert tga_n.pformat() == [' a c d ', + '--- --- ---', + ' 0 0.0 4.0', + ' 1 2.0 6.0', + ' 2 5.5 1.5'] + + # Binary ufunc np_add should raise warning without reduceat + t2 = T1['a', 'c'] + tg = t2.group_by('a') + + with catch_warnings(Warning) as warning_lines: + tga = tg.groups.aggregate(np_add) + assert warning_lines[0].category == AstropyUserWarning + assert "Cannot aggregate column" in str(warning_lines[0].message) + assert tga.pformat() == [' a ', + '---', + ' 0', + ' 1', + ' 2'] + + +def test_column_aggregate(T1): + """ + Aggregate a single table column + """ + for masked in (False, True): + tg = Table(T1, masked=masked).group_by('a') + tga = tg['c'].groups.aggregate(np.sum) + assert tga.pformat() == [' c ', + '----', + ' 0.0', + ' 6.0', + '22.0'] + + +def test_table_filter(): + """ + Table groups filtering + """ + def all_positive(table, key_colnames): + colnames = [name for name in table.colnames if name not in key_colnames] + for colname in colnames: + if np.any(table[colname] < 0): + return False + return True + + # Negative value in 'a' column should not filter because it is a key col + t = Table.read([' a c d', + ' -2 7.0 0', + ' -2 5.0 1', + ' 0 0.0 4', + ' 1 3.0 5', + ' 1 2.0 -6', + ' 1 1.0 7', + ' 3 3.0 5', + ' 3 -2.0 6', + ' 3 1.0 7', + ], format='ascii') + tg = t.group_by('a') + t2 = tg.groups.filter(all_positive) + assert t2.groups[0].pformat() == [' a c d ', + '--- --- ---', + ' -2 7.0 0', + ' -2 5.0 1'] + assert t2.groups[1].pformat() == [' a c d ', + '--- --- ---', + ' 0 0.0 4'] + + +def test_column_filter(): + """ + Table groups filtering + """ + def all_positive(column): + if np.any(column < 0): + return False + return True + + # Negative value in 'a' column should not filter because it is a key col + t = Table.read([' a c d', + ' -2 7.0 0', + ' -2 5.0 1', + ' 0 0.0 4', + ' 1 3.0 5', + ' 1 2.0 -6', + ' 1 1.0 7', + ' 3 3.0 5', + ' 3 -2.0 6', + ' 3 1.0 7', + ], format='ascii') + tg = t.group_by('a') + c2 = tg['c'].groups.filter(all_positive) + assert len(c2.groups) == 3 + assert c2.groups[0].pformat() == [' c ', '---', '7.0', '5.0'] + assert c2.groups[1].pformat() == [' c ', '---', '0.0'] + assert c2.groups[2].pformat() == [' c ', '---', '3.0', '2.0', '1.0'] diff --git a/astropy/table/tests/test_index.py b/astropy/table/tests/test_index.py new file mode 100644 index 0000000..23d3172 --- /dev/null +++ b/astropy/table/tests/test_index.py @@ -0,0 +1,463 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +import pytest +import numpy as np + +from .test_table import SetupData +from ..bst import BST, FastRBT, FastBST +from ..sorted_array import SortedArray +from ..table import QTable, Row +from ... import units as u +from ...time import Time +from ..column import BaseColumn +from ...extern.six.moves import range + +try: + import bintrees +except ImportError: + HAS_BINTREES = False +else: + HAS_BINTREES = True + + +if HAS_BINTREES: + available_engines = [BST, FastBST, FastRBT, SortedArray] +else: + available_engines = [BST, SortedArray] + + +@pytest.fixture(params=available_engines) +def engine(request): + return request.param + + +_col = [1, 2, 3, 4, 5] + + +@pytest.fixture(params=[ + _col, + u.Quantity(_col), + Time(_col, format='jyear'), +]) +def main_col(request): + return request.param + + +def assert_col_equal(col, array): + if isinstance(col, Time): + assert np.all(col == Time(array, format='jyear')) + else: + assert np.all(col == col.__class__(array)) + + +@pytest.mark.usefixtures('table_types') +class TestIndex(SetupData): + def _setup(self, main_col, table_types): + super(TestIndex, self)._setup(table_types) + self.main_col = main_col + if isinstance(main_col, u.Quantity): + self._table_type = QTable + if not isinstance(main_col, list): + self._column_type = lambda x: x # don't change mixin type + self.mutable = isinstance(main_col, (list, u.Quantity)) + + def make_col(self, name, lst): + return self._column_type(lst, name=name) + + def make_val(self, val): + if isinstance(self.main_col, Time): + return Time(val, format='jyear') + return val + + @property + def t(self): + if not hasattr(self, '_t'): + self._t = self._table_type() + self._t['a'] = self._column_type(self.main_col) + self._t['b'] = self._column_type([4.0, 5.1, 6.2, 7.0, 1.1]) + self._t['c'] = self._column_type(['7', '8', '9', '10', '11']) + return self._t + + @pytest.mark.parametrize("composite", [False, True]) + def test_table_index(self, main_col, table_types, composite, engine): + self._setup(main_col, table_types) + t = self.t + t.add_index(('a', 'b') if composite else 'a', engine=engine) + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + + if not self.mutable: + return + + # test altering table columns + t['a'][0] = 4 + t.add_row((6, 6.0, '7')) + t['a'][3] = 10 + t.remove_row(2) + t.add_row((4, 5.0, '9')) + + assert_col_equal(t['a'], np.array([4, 2, 10, 5, 6, 4])) + assert np.allclose(t['b'], np.array([4.0, 5.1, 7.0, 1.1, 6.0, 5.0])) + assert np.all(t['c'].data == np.array(['7', '8', '10', '11', '7', '9'])) + index = t.indices[0] + l = list(index.data.items()) + + if composite: + assert np.all(l == [((2, 5.1), [1]), + ((4, 4.0), [0]), + ((4, 5.0), [5]), + ((5, 1.1), [3]), + ((6, 6.0), [4]), + ((10, 7.0), [2])]) + else: + assert np.all(l == [((2,), [1]), + ((4,), [0, 5]), + ((5,), [3]), + ((6,), [4]), + ((10,), [2])]) + t.remove_indices('a') + assert len(t.indices) == 0 + + def test_table_slicing(self, main_col, table_types, engine): + self._setup(main_col, table_types) + t = self.t + t.add_index('a', engine=engine) + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + + for slice_ in ([0, 2], np.array([0, 2])): + t2 = t[slice_] + # t2 should retain an index on column 'a' + assert len(t2.indices) == 1 + assert_col_equal(t2['a'], [1, 3]) + + # the index in t2 should reorder row numbers after slicing + assert np.all(t2.indices[0].sorted_data() == [0, 1]) + # however, this index should be a deep copy of t1's index + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + + def test_remove_rows(self, main_col, table_types, engine): + self._setup(main_col, table_types) + if not self.mutable: + return + t = self.t + t.add_index('a', engine=engine) + + # remove individual row + t2 = t.copy() + t2.remove_rows(2) + assert_col_equal(t2['a'], [1, 2, 4, 5]) + assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3]) + + # remove by list, ndarray, or slice + for cut in ([0, 2, 4], np.array([0, 2, 4]), slice(0, 5, 2)): + t2 = t.copy() + t2.remove_rows(cut) + assert_col_equal(t2['a'], [2, 4]) + assert np.all(t2.indices[0].sorted_data() == [0, 1]) + + with pytest.raises(ValueError): + t.remove_rows((0, 2, 4)) + + def test_col_get_slice(self, main_col, table_types, engine): + self._setup(main_col, table_types) + t = self.t + t.add_index('a', engine=engine) + + # get slice + t2 = t[1:3] # table slice + assert_col_equal(t2['a'], [2, 3]) + assert np.all(t2.indices[0].sorted_data() == [0, 1]) + + col_slice = t['a'][1:3] + assert_col_equal(col_slice, [2, 3]) + # true column slices discard indices + if isinstance(t['a'], BaseColumn): + assert len(col_slice.info.indices) == 0 + + # take slice of slice + t2 = t[::2] + assert_col_equal(t2['a'], np.array([1, 3, 5])) + t3 = t2[::-1] + assert_col_equal(t3['a'], np.array([5, 3, 1])) + assert np.all(t3.indices[0].sorted_data() == [2, 1, 0]) + t3 = t2[:2] + assert_col_equal(t3['a'], np.array([1, 3])) + assert np.all(t3.indices[0].sorted_data() == [0, 1]) + # out-of-bound slices + for t_empty in (t2[3:], t2[2:1], t3[2:]): + assert len(t_empty['a']) == 0 + assert np.all(t_empty.indices[0].sorted_data() == []) + + if self.mutable: + # get boolean mask + mask = t['a'] % 2 == 1 + t2 = t[mask] + assert_col_equal(t2['a'], [1, 3, 5]) + assert np.all(t2.indices[0].sorted_data() == [0, 1, 2]) + + def test_col_set_slice(self, main_col, table_types, engine): + self._setup(main_col, table_types) + if not self.mutable: + return + t = self.t + t.add_index('a', engine=engine) + + # set slice + t2 = t.copy() + t2['a'][1:3] = np.array([6, 7]) + assert_col_equal(t2['a'], np.array([1, 6, 7, 4, 5])) + assert np.all(t2.indices[0].sorted_data() == [0, 3, 4, 1, 2]) + + # change original table via slice reference + t2 = t.copy() + t3 = t2[1:3] + assert_col_equal(t3['a'], np.array([2, 3])) + assert np.all(t3.indices[0].sorted_data() == [0, 1]) + t3['a'][0] = 5 + assert_col_equal(t3['a'], np.array([5, 3])) + assert_col_equal(t2['a'], np.array([1, 5, 3, 4, 5])) + assert np.all(t3.indices[0].sorted_data() == [1, 0]) + assert np.all(t2.indices[0].sorted_data() == [0, 2, 3, 1, 4]) + + # set boolean mask + t2 = t.copy() + mask = t['a'] % 2 == 1 + t2['a'][mask] = 0. + assert_col_equal(t2['a'], [0, 2, 0, 4, 0]) + assert np.all(t2.indices[0].sorted_data() == [0, 2, 4, 1, 3]) + + def test_multiple_slices(self, main_col, table_types, engine): + self._setup(main_col, table_types) + + if not self.mutable: + return + + t = self.t + t.add_index('a', engine=engine) + + for i in range(6, 51): + t.add_row((i, 1.0, 'A')) + + assert_col_equal(t['a'], [i for i in range(1, 51)]) + assert np.all(t.indices[0].sorted_data() == [i for i in range(50)]) + + evens = t[::2] + assert np.all(evens.indices[0].sorted_data() == [i for i in range(25)]) + reverse = evens[::-1] + index = reverse.indices[0] + assert (index.start, index.stop, index.step) == (48, -2, -2) + assert np.all(index.sorted_data() == [i for i in range(24, -1, -1)]) + + # modify slice of slice + reverse[-10:] = 0 + expected = np.array([i for i in range(1, 51)]) + expected[:20][expected[:20] % 2 == 1] = 0 + assert_col_equal(t['a'], expected) + assert_col_equal(evens['a'], expected[::2]) + assert_col_equal(reverse['a'], expected[::2][::-1]) + # first ten evens are now zero + assert np.all(t.indices[0].sorted_data() == + [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, + 1, 3, 5, 7, 9, 11, 13, 15, 17, 19] + + [i for i in range(20, 50)]) + assert np.all(evens.indices[0].sorted_data() == [i for i in range(25)]) + assert np.all(reverse.indices[0].sorted_data() == + [i for i in range(24, -1, -1)]) + + # try different step sizes of slice + t2 = t[1:20:2] + assert_col_equal(t2['a'], [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]) + assert np.all(t2.indices[0].sorted_data() == [i for i in range(10)]) + t3 = t2[::3] + assert_col_equal(t3['a'], [2, 8, 14, 20]) + assert np.all(t3.indices[0].sorted_data() == [0, 1, 2, 3]) + t4 = t3[2::-1] + assert_col_equal(t4['a'], [14, 8, 2]) + assert np.all(t4.indices[0].sorted_data() == [2, 1, 0]) + + def test_sort(self, main_col, table_types, engine): + self._setup(main_col, table_types) + t = self.t[::-1] # reverse table + assert_col_equal(t['a'], [5, 4, 3, 2, 1]) + t.add_index('a', engine=engine) + assert np.all(t.indices[0].sorted_data() == [4, 3, 2, 1, 0]) + + if not self.mutable: + return + + # sort table by column a + t2 = t.copy() + t2.sort('a') + assert_col_equal(t2['a'], [1, 2, 3, 4, 5]) + assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + + # sort table by primary key + t2 = t.copy() + t2.sort() + assert_col_equal(t2['a'], [1, 2, 3, 4, 5]) + assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + + def test_insert_row(self, main_col, table_types, engine): + self._setup(main_col, table_types) + + if not self.mutable: + return + + t = self.t + t.add_index('a', engine=engine) + t.insert_row(2, (6, 1.0, '12')) + assert_col_equal(t['a'], [1, 2, 6, 3, 4, 5]) + assert np.all(t.indices[0].sorted_data() == [0, 1, 3, 4, 5, 2]) + t.insert_row(1, (0, 4.0, '13')) + assert_col_equal(t['a'], [1, 0, 2, 6, 3, 4, 5]) + assert np.all(t.indices[0].sorted_data() == [1, 0, 2, 4, 5, 6, 3]) + + def test_index_modes(self, main_col, table_types, engine): + self._setup(main_col, table_types) + t = self.t + t.add_index('a', engine=engine) + + # first, no special mode + assert len(t[[1, 3]].indices) == 1 + assert len(t[::-1].indices) == 1 + assert len(self._table_type(t).indices) == 1 + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + t2 = t.copy() + + # non-copy mode + with t.index_mode('discard_on_copy'): + assert len(t[[1, 3]].indices) == 0 + assert len(t[::-1].indices) == 0 + assert len(self._table_type(t).indices) == 0 + assert len(t2.copy().indices) == 1 # mode should only affect t + + # make sure non-copy mode is exited correctly + assert len(t[[1, 3]].indices) == 1 + + if not self.mutable: + return + + # non-modify mode + with t.index_mode('freeze'): + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + t['a'][0] = 6 + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + t.add_row((2, 1.5, '12')) + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + t.remove_rows([1, 3]) + assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + assert_col_equal(t['a'], [6, 3, 5, 2]) + # mode should only affect t + assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) + t2['a'][0] = 6 + assert np.all(t2.indices[0].sorted_data() == [1, 2, 3, 4, 0]) + + # make sure non-modify mode is exited correctly + assert np.all(t.indices[0].sorted_data() == [3, 1, 2, 0]) + + if isinstance(t['a'], BaseColumn): + assert len(t['a'][::-1].info.indices) == 0 + with t.index_mode('copy_on_getitem'): + assert len(t['a'][[1, 2]].info.indices) == 1 + # mode should only affect t + assert len(t2['a'][[1, 2]].info.indices) == 0 + + assert len(t['a'][::-1].info.indices) == 0 + assert len(t2['a'][::-1].info.indices) == 0 + + def test_index_retrieval(self, main_col, table_types, engine): + self._setup(main_col, table_types) + t = self.t + t.add_index('a', engine=engine) + t.add_index(['a', 'c'], engine=engine) + assert len(t.indices) == 2 + assert len(t.indices['a'].columns) == 1 + assert len(t.indices['a', 'c'].columns) == 2 + + with pytest.raises(IndexError): + t.indices['b'] + + def test_col_rename(self, main_col, table_types, engine): + ''' + Checks for a previous bug in which copying a Table + with different column names raised an exception. + ''' + self._setup(main_col, table_types) + t = self.t + t.add_index('a', engine=engine) + t2 = self._table_type(self.t, names=['d', 'e', 'f']) + assert len(t2.indices) == 1 + + def test_table_loc(self, main_col, table_types, engine): + self._setup(main_col, table_types) + t = self.t + + t.add_index('a', engine=engine) + t.add_index('b', engine=engine) + + t2 = t.loc[self.make_val(3)] # single label, with primary key 'a' + assert_col_equal(t2['a'], [3]) + assert isinstance(t2, Row) + + # list search + t2 = t.loc[[self.make_val(1), self.make_val(4), self.make_val(2)]] + assert_col_equal(t2['a'], [1, 4, 2]) # same order as input list + if not isinstance(main_col, Time): + # ndarray search + t2 = t.loc[np.array([1, 4, 2])] + assert_col_equal(t2['a'], [1, 4, 2]) + assert_col_equal(t2['a'], [1, 4, 2]) + t2 = t.loc[self.make_val(3): self.make_val(5)] # range search + assert_col_equal(t2['a'], [3, 4, 5]) + t2 = t.loc['b', 5.0:7.0] + assert_col_equal(t2['b'], [5.1, 6.2, 7.0]) + # search by sorted index + t2 = t.iloc[0:2] # two smallest rows by column 'a' + assert_col_equal(t2['a'], [1, 2]) + t2 = t.iloc['b', 2:] # exclude two smallest rows in column 'b' + assert_col_equal(t2['b'], [5.1, 6.2, 7.0]) + + for t2 in (t.loc[:], t.iloc[:]): + assert_col_equal(t2['a'], [1, 2, 3, 4, 5]) + + def test_invalid_search(self, main_col, table_types, engine): + # using .loc with a value not present should raise an exception + self._setup(main_col, table_types) + t = self.t + + t.add_index('a') + with pytest.raises(KeyError): + t.loc[self.make_val(6)] + + def test_copy_index_references(self, main_col, table_types, engine): + # check against a bug in which indices were given an incorrect + # column reference when copied + self._setup(main_col, table_types) + t = self.t + + t.add_index('a') + t.add_index('b') + t2 = t.copy() + assert t2.indices['a'].columns[0] is t2['a'] + assert t2.indices['b'].columns[0] is t2['b'] + + def test_unique_index(self, main_col, table_types, engine): + self._setup(main_col, table_types) + t = self.t + + t.add_index('a', engine=engine, unique=True) + assert np.all(t.indices['a'].sorted_data() == [0, 1, 2, 3, 4]) + + if self.mutable: + with pytest.raises(ValueError): + t.add_row((5, 5.0, '9')) + + def test_copy_indexed_table(self, table_types): + self._setup(_col, table_types) + t = self.t + t.add_index('a') + t.add_index(['a', 'b']) + for tp in (self._table_type(t), t.copy()): + assert len(t.indices) == len(tp.indices) + for index, indexp in zip(t.indices, tp.indices): + assert np.all(index.data.data == indexp.data.data) + assert index.data.data.colnames == indexp.data.data.colnames diff --git a/astropy/table/tests/test_info.py b/astropy/table/tests/test_info.py new file mode 100644 index 0000000..793418c --- /dev/null +++ b/astropy/table/tests/test_info.py @@ -0,0 +1,246 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +import warnings +from collections import OrderedDict + +import numpy as np + +from ...extern.six.moves import cStringIO as StringIO +from ... import units as u +from ... import time +from ... import coordinates +from ... import table +from ...utils.data_info import data_info_factory, dtype_info_name +from ..table_helpers import simple_table + + +def test_table_info_attributes(table_types): + """ + Test the info() method of printing a summary of table column attributes + """ + a = np.array([1, 2, 3], dtype='int32') + b = np.array([1, 2, 3], dtype='float32') + c = np.array(['a', 'c', 'e'], dtype='|S1') + t = table_types.Table([a, b, c], names=['a', 'b', 'c']) + + # Minimal output for a typical table + tinfo = t.info(out=None) + subcls = ['class'] if table_types.Table.__name__ == 'MyTable' else [] + assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', + 'description', 'class', 'n_bad', 'length'] + assert np.all(tinfo['name'] == ['a', 'b', 'c']) + assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1')]) + if subcls: + assert np.all(tinfo['class'] == ['MyColumn'] * 3) + + # All output fields including a mixin column + t['d'] = [1, 2, 3] * u.m + t['d'].description = 'quantity' + t['a'].format = '%02d' + t['e'] = time.Time([1, 2, 3], format='mjd') + t['e'].info.description = 'time' + t['f'] = coordinates.SkyCoord([1, 2, 3], [1, 2, 3], unit='deg') + t['f'].info.description = 'skycoord' + + tinfo = t.info(out=None) + assert np.all(tinfo['name'] == 'a b c d e f'.split()) + assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1'), 'float64', + 'object', 'object']) + assert np.all(tinfo['unit'] == ['', '', '', 'm', '', 'deg,deg']) + assert np.all(tinfo['format'] == ['%02d', '', '', '', '', '']) + assert np.all(tinfo['description'] == ['', '', '', 'quantity', 'time', 'skycoord']) + cls = t.ColumnClass.__name__ + assert np.all(tinfo['class'] == [cls, cls, cls, cls, 'Time', 'SkyCoord']) + + # Test that repr(t.info) is same as t.info() + out = StringIO() + t.info(out=out) + assert repr(t.info) == out.getvalue() + + +def test_table_info_stats(table_types): + """ + Test the info() method of printing a summary of table column statistics + """ + a = np.array([1, 2, 1, 2], dtype='int32') + b = np.array([1, 2, 1, 2], dtype='float32') + c = np.array(['a', 'c', 'e', 'f'], dtype='|S1') + d = time.Time([1, 2, 1, 2], format='mjd') + t = table_types.Table([a, b, c, d], names=['a', 'b', 'c', 'd']) + + # option = 'stats' + masked = 'masked=True ' if t.masked else '' + out = StringIO() + t.info('stats', out=out) + table_header_line = '<{0} {1}length=4>'.format(t.__class__.__name__, masked) + exp = [table_header_line, + 'name mean std min max', + '---- ---- --- --- ---', + ' a 1.5 0.5 1 2', + ' b 1.5 0.5 1.0 2.0', + ' c -- -- -- --', + ' d -- -- 1.0 2.0'] + assert out.getvalue().splitlines() == exp + + # option = ['attributes', 'stats'] + tinfo = t.info(['attributes', 'stats'], out=None) + assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description', + 'class', 'mean', 'std', 'min', 'max', 'n_bad', 'length'] + assert np.all(tinfo['mean'] == ['1.5', '1.5', '--', '--']) + assert np.all(tinfo['std'] == ['0.5', '0.5', '--', '--']) + assert np.all(tinfo['min'] == ['1', '1.0', '--', '1.0']) + assert np.all(tinfo['max'] == ['2', '2.0', '--', '2.0']) + + out = StringIO() + t.info('stats', out=out) + exp = [table_header_line, + 'name mean std min max', + '---- ---- --- --- ---', + ' a 1.5 0.5 1 2', + ' b 1.5 0.5 1.0 2.0', + ' c -- -- -- --', + ' d -- -- 1.0 2.0'] + assert out.getvalue().splitlines() == exp + + # option = ['attributes', custom] + custom = data_info_factory(names=['sum', 'first'], + funcs=[np.sum, lambda col: col[0]]) + out = StringIO() + tinfo = t.info(['attributes', custom], out=None) + assert tinfo.colnames == ['name', 'dtype', 'shape', 'unit', 'format', 'description', + 'class', 'sum', 'first', 'n_bad', 'length'] + assert np.all(tinfo['name'] == ['a', 'b', 'c', 'd']) + assert np.all(tinfo['dtype'] == ['int32', 'float32', dtype_info_name('S1'), 'object']) + assert np.all(tinfo['sum'] == ['6', '6.0', '--', '--']) + assert np.all(tinfo['first'] == ['1', '1.0', 'a', '1.0']) + + +def test_data_info(): + """ + Test getting info for just a column. + """ + cols = [table.Column([1.0, 2.0, np.nan], name='name', + description='description', unit='m/s'), + table.MaskedColumn([1.0, 2.0, 3.0], name='name', + description='description', unit='m/s', + mask=[False, False, True])] + for c in cols: + # Test getting the full ordered dict + cinfo = c.info(out=None) + assert cinfo == OrderedDict([('name', 'name'), + ('dtype', 'float64'), + ('shape', ''), + ('unit', 'm / s'), + ('format', ''), + ('description', 'description'), + ('class', type(c).__name__), + ('n_bad', 1), + ('length', 3)]) + + # Test the console (string) version which omits trivial values + out = StringIO() + c.info(out=out) + exp = ['name = name', + 'dtype = float64', + 'unit = m / s', + 'description = description', + 'class = {0}'.format(type(c).__name__), + 'n_bad = 1', + 'length = 3'] + assert out.getvalue().splitlines() == exp + + # repr(c.info) gives the same as c.info() + assert repr(c.info) == out.getvalue() + + # Test stats info + cinfo = c.info('stats', out=None) + assert cinfo == OrderedDict([('name', 'name'), + ('mean', '1.5'), + ('std', '0.5'), + ('min', '1.0'), + ('max', '2.0'), + ('n_bad', 1), + ('length', 3)]) + + +def test_data_info_subclass(): + class Column(table.Column): + """ + Confusingly named Column on purpose, but that is legal. + """ + pass + for data in ([], [1, 2]): + c = Column(data, dtype='int64') + cinfo = c.info(out=None) + assert cinfo == OrderedDict([('dtype', 'int64'), + ('shape', ''), + ('unit', ''), + ('format', ''), + ('description', ''), + ('class', 'Column'), + ('n_bad', 0), + ('length', len(data))]) + + +def test_scalar_info(): + """ + Make sure info works with scalar values + """ + c = time.Time('2000:001') + cinfo = c.info(out=None) + assert cinfo['n_bad'] == 0 + assert 'length' not in cinfo + + +def test_empty_table(): + t = table.Table() + out = StringIO() + t.info(out=out) + exp = ['', ''] + assert out.getvalue().splitlines() == exp + + +def test_class_attribute(): + """ + Test that class info column is suppressed only for identical non-mixin + columns. + """ + vals = [[1] * u.m, [2] * u.m] + + texp = ['
    ', + 'name dtype unit', + '---- ------- ----', + 'col0 float64 m', + 'col1 float64 m'] + + qexp = ['', + 'name dtype unit class ', + '---- ------- ---- --------', + 'col0 float64 m Quantity', + 'col1 float64 m Quantity'] + + for table_cls, exp in ((table.Table, texp), + (table.QTable, qexp)): + t = table_cls(vals) + out = StringIO() + t.info(out=out) + assert out.getvalue().splitlines() == exp + + +def test_ignore_warnings(): + t = table.Table([[np.nan, np.nan]]) + with warnings.catch_warnings(record=True) as warns: + t.info('stats', out=None) + assert len(warns) == 0 + + +def test_no_deprecation_warning(): + # regression test for #5459, where numpy deprecation warnings were + # emitted unnecessarily. + t = simple_table() + with warnings.catch_warnings(record=True) as warns: + t.info() + assert len(warns) == 0 diff --git a/astropy/table/tests/test_init_table.py b/astropy/table/tests/test_init_table.py new file mode 100644 index 0000000..ac323f0 --- /dev/null +++ b/astropy/table/tests/test_init_table.py @@ -0,0 +1,492 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +from __future__ import print_function # For print debugging with python 2 or 3 + +from collections import OrderedDict, Mapping +from ...extern import six + +import pytest +import numpy as np + +from ...table import Column, TableColumns + +# Unfortunatly the python2 UserDict.UserDict is not a Mapping so it is not +# possible to use "from six.moves import UserDict". Instead we have to use +# IterableUserDict (which is a Mapping) here. +if six.PY2: + from UserDict import IterableUserDict as UserDict +else: + from collections import UserDict + + +class TestTableColumnsInit(): + def test_init(self): + """Test initialisation with lists, tuples, dicts of arrays + rather than Columns [regression test for #2647]""" + x1 = np.arange(10.) + x2 = np.arange(5.) + x3 = np.arange(7.) + col_list = [('x1', x1), ('x2', x2), ('x3', x3)] + tc_list = TableColumns(col_list) + for col in col_list: + assert col[0] in tc_list + assert tc_list[col[0]] is col[1] + + col_tuple = (('x1', x1), ('x2', x2), ('x3', x3)) + tc_tuple = TableColumns(col_tuple) + for col in col_tuple: + assert col[0] in tc_tuple + assert tc_tuple[col[0]] is col[1] + + col_dict = dict([('x1', x1), ('x2', x2), ('x3', x3)]) + tc_dict = TableColumns(col_dict) + for col in tc_dict.keys(): + assert col in tc_dict + assert tc_dict[col] is col_dict[col] + + columns = [Column(col[1], name=col[0]) for col in col_list] + tc = TableColumns(columns) + for col in columns: + assert col.name in tc + assert tc[col.name] is col + + +# pytest.mark.usefixtures('table_type') +class BaseInitFrom(): + def _setup(self, table_type): + pass + + def test_basic_init(self, table_type): + self._setup(table_type) + t = table_type(self.data, names=('a', 'b', 'c')) + assert t.colnames == ['a', 'b', 'c'] + assert np.all(t['a'] == np.array([1, 3])) + assert np.all(t['b'] == np.array([2, 4])) + assert np.all(t['c'] == np.array([3, 5])) + assert all(t[name].name == name for name in t.colnames) + + def test_set_dtype(self, table_type): + self._setup(table_type) + t = table_type(self.data, names=('a', 'b', 'c'), dtype=('i4', 'f4', 'f8')) + assert t.colnames == ['a', 'b', 'c'] + assert np.all(t['a'] == np.array([1, 3], dtype='i4')) + assert np.all(t['b'] == np.array([2, 4], dtype='f4')) + assert np.all(t['c'] == np.array([3, 5], dtype='f8')) + assert t['a'].dtype.type == np.int32 + assert t['b'].dtype.type == np.float32 + assert t['c'].dtype.type == np.float64 + assert all(t[name].name == name for name in t.colnames) + + def test_names_dtype_mismatch(self, table_type): + self._setup(table_type) + with pytest.raises(ValueError): + table_type(self.data, names=('a',), dtype=('i4', 'f4', 'i4')) + + def test_names_cols_mismatch(self, table_type): + self._setup(table_type) + with pytest.raises(ValueError): + table_type(self.data, names=('a',), dtype=('i4')) + + +@pytest.mark.usefixtures('table_type') +class BaseInitFromListLike(BaseInitFrom): + + def test_names_cols_mismatch(self, table_type): + self._setup(table_type) + with pytest.raises(ValueError): + table_type(self.data, names=['a'], dtype=[int]) + + def test_names_copy_false(self, table_type): + self._setup(table_type) + with pytest.raises(ValueError): + table_type(self.data, names=['a'], dtype=[int], copy=False) + + +@pytest.mark.usefixtures('table_type') +class BaseInitFromDictLike(BaseInitFrom): + pass + + +@pytest.mark.usefixtures('table_type') +class TestInitFromNdarrayHomo(BaseInitFromListLike): + + def setup_method(self, method): + self.data = np.array([(1, 2, 3), + (3, 4, 5)], + dtype='i4') + + def test_default_names(self, table_type): + self._setup(table_type) + t = table_type(self.data) + assert t.colnames == ['col0', 'col1', 'col2'] + + def test_ndarray_ref(self, table_type): + """Init with ndarray and copy=False and show that this is a reference + to input ndarray""" + self._setup(table_type) + t = table_type(self.data, copy=False) + t['col1'][1] = 0 + assert t.as_array()['col1'][1] == 0 + assert t['col1'][1] == 0 + assert self.data[1][1] == 0 + + def test_partial_names_dtype(self, table_type): + self._setup(table_type) + t = table_type(self.data, names=['a', None, 'c'], dtype=[None, None, 'f8']) + assert t.colnames == ['a', 'col1', 'c'] + assert t['a'].dtype.type == np.int32 + assert t['col1'].dtype.type == np.int32 + assert t['c'].dtype.type == np.float64 + assert all(t[name].name == name for name in t.colnames) + + def test_partial_names_ref(self, table_type): + self._setup(table_type) + t = table_type(self.data, names=['a', None, 'c']) + assert t.colnames == ['a', 'col1', 'c'] + assert t['a'].dtype.type == np.int32 + assert t['col1'].dtype.type == np.int32 + assert t['c'].dtype.type == np.int32 + assert all(t[name].name == name for name in t.colnames) + + +@pytest.mark.usefixtures('table_type') +class TestInitFromListOfLists(BaseInitFromListLike): + + def setup_method(self, table_type): + self._setup(table_type) + self.data = [(np.int32(1), np.int32(3)), + Column(name='col1', data=[2, 4], dtype=np.int32), + np.array([3, 5], dtype=np.int32)] + + def test_default_names(self, table_type): + self._setup(table_type) + t = table_type(self.data) + assert t.colnames == ['col0', 'col1', 'col2'] + assert all(t[name].name == name for name in t.colnames) + + def test_partial_names_dtype(self, table_type): + self._setup(table_type) + t = table_type(self.data, names=['b', None, 'c'], + dtype=['f4', None, 'f8']) + assert t.colnames == ['b', 'col1', 'c'] + assert t['b'].dtype.type == np.float32 + assert t['col1'].dtype.type == np.int32 + assert t['c'].dtype.type == np.float64 + assert all(t[name].name == name for name in t.colnames) + + def test_bad_data(self, table_type): + self._setup(table_type) + with pytest.raises(ValueError): + table_type([[1, 2], + [3, 4, 5]]) + + +@pytest.mark.usefixtures('table_type') +class TestInitFromListOfDicts(BaseInitFromListLike): + + def _setup(self, table_type): + self.data = [{'a': 1, 'b': 2, 'c': 3}, + {'a': 3, 'b': 4, 'c': 5}] + + def test_names(self, table_type): + self._setup(table_type) + t = table_type(self.data) + assert all(colname in set(['a', 'b', 'c']) for colname in t.colnames) + + def test_names_ordered(self, table_type): + self._setup(table_type) + t = table_type(self.data, names=('c', 'b', 'a')) + assert t.colnames == ['c', 'b', 'a'] + + def test_bad_data(self, table_type): + self._setup(table_type) + with pytest.raises(ValueError): + table_type([{'a': 1, 'b': 2, 'c': 3}, + {'a': 2, 'b': 4}]) + + +@pytest.mark.usefixtures('table_type') +class TestInitFromColsList(BaseInitFromListLike): + + def _setup(self, table_type): + self.data = [Column([1, 3], name='x', dtype=np.int32), + np.array([2, 4], dtype=np.int32), + np.array([3, 5], dtype='i8')] + + def test_default_names(self, table_type): + self._setup(table_type) + t = table_type(self.data) + assert t.colnames == ['x', 'col1', 'col2'] + assert all(t[name].name == name for name in t.colnames) + + def test_partial_names_dtype(self, table_type): + self._setup(table_type) + t = table_type(self.data, names=['b', None, 'c'], dtype=['f4', None, 'f8']) + assert t.colnames == ['b', 'col1', 'c'] + assert t['b'].dtype.type == np.float32 + assert t['col1'].dtype.type == np.int32 + assert t['c'].dtype.type == np.float64 + assert all(t[name].name == name for name in t.colnames) + + def test_ref(self, table_type): + """Test that initializing from a list of columns can be done by reference""" + self._setup(table_type) + t = table_type(self.data, copy=False) + t['x'][0] = 100 + assert self.data[0][0] == 100 + + +@pytest.mark.usefixtures('table_type') +class TestInitFromNdarrayStruct(BaseInitFromDictLike): + + def _setup(self, table_type): + self.data = np.array([(1, 2, 3), + (3, 4, 5)], + dtype=[(str('x'), 'i8'), (str('y'), 'i4'), (str('z'), 'i8')]) + + def test_ndarray_ref(self, table_type): + """Init with ndarray and copy=False and show that table uses reference + to input ndarray""" + self._setup(table_type) + t = table_type(self.data, copy=False) + + t['x'][1] = 0 # Column-wise assignment + t[0]['y'] = 0 # Row-wise assignment + assert self.data['x'][1] == 0 + assert self.data['y'][0] == 0 + assert np.all(np.array(t) == self.data) + assert all(t[name].name == name for name in t.colnames) + + def test_partial_names_dtype(self, table_type): + self._setup(table_type) + t = table_type(self.data, names=['e', None, 'd'], dtype=['f4', None, 'f8']) + assert t.colnames == ['e', 'y', 'd'] + assert t['e'].dtype.type == np.float32 + assert t['y'].dtype.type == np.int32 + assert t['d'].dtype.type == np.float64 + assert all(t[name].name == name for name in t.colnames) + + def test_partial_names_ref(self, table_type): + self._setup(table_type) + t = table_type(self.data, names=['e', None, 'd'], copy=False) + assert t.colnames == ['e', 'y', 'd'] + assert t['e'].dtype.type == np.int64 + assert t['y'].dtype.type == np.int32 + assert t['d'].dtype.type == np.int64 + assert all(t[name].name == name for name in t.colnames) + + +@pytest.mark.usefixtures('table_type') +class TestInitFromDict(BaseInitFromDictLike): + + def _setup(self, table_type): + self.data = dict([('a', Column([1, 3], name='x')), + ('b', [2, 4]), + ('c', np.array([3, 5], dtype='i8'))]) + + +@pytest.mark.usefixtures('table_type') +class TestInitFromMapping(BaseInitFromDictLike): + + def _setup(self, table_type): + self.data = UserDict([('a', Column([1, 3], name='x')), + ('b', [2, 4]), + ('c', np.array([3, 5], dtype='i8'))]) + assert isinstance(self.data, Mapping) + assert not isinstance(self.data, dict) + + +@pytest.mark.usefixtures('table_type') +class TestInitFromOrderedDict(BaseInitFromDictLike): + + def _setup(self, table_type): + self.data = OrderedDict([('a', Column(name='x', data=[1, 3])), + ('b', [2, 4]), + ('c', np.array([3, 5], dtype='i8'))]) + + def test_col_order(self, table_type): + self._setup(table_type) + t = table_type(self.data) + assert t.colnames == ['a', 'b', 'c'] + + +@pytest.mark.usefixtures('table_type') +class TestInitFromRow(BaseInitFromDictLike): + + def _setup(self, table_type): + arr = np.array([(1, 2, 3), + (3, 4, 5)], + dtype=[(str('x'), 'i8'), (str('y'), 'i8'), (str('z'), 'f8')]) + self.data = table_type(arr, meta={'comments': ['comment1', 'comment2']}) + + def test_init_from_row(self, table_type): + self._setup(table_type) + t = table_type(self.data[0]) + + # Values and meta match original + assert t.meta['comments'][0] == 'comment1' + for name in t.colnames: + assert np.all(t[name] == self.data[name][0:1]) + assert all(t[name].name == name for name in t.colnames) + + # Change value in new instance and check that original is the same + t['x'][0] = 8 + t.meta['comments'][1] = 'new comment2' + assert np.all(t['x'] == np.array([8])) + assert np.all(self.data['x'] == np.array([1, 3])) + assert self.data.meta['comments'][1] == 'comment2' + + +@pytest.mark.usefixtures('table_type') +class TestInitFromTable(BaseInitFromDictLike): + + def _setup(self, table_type): + arr = np.array([(1, 2, 3), + (3, 4, 5)], + dtype=[(str('x'), 'i8'), (str('y'), 'i8'), (str('z'), 'f8')]) + self.data = table_type(arr, meta={'comments': ['comment1', 'comment2']}) + + def test_data_meta_copy(self, table_type): + self._setup(table_type) + t = table_type(self.data) + assert t.meta['comments'][0] == 'comment1' + t['x'][1] = 8 + t.meta['comments'][1] = 'new comment2' + assert self.data.meta['comments'][1] == 'comment2' + assert np.all(t['x'] == np.array([1, 8])) + assert np.all(self.data['x'] == np.array([1, 3])) + assert t['z'].name == 'z' + assert all(t[name].name == name for name in t.colnames) + + def test_table_ref(self, table_type): + self._setup(table_type) + t = table_type(self.data, copy=False) + t['x'][1] = 0 + assert t['x'][1] == 0 + assert self.data['x'][1] == 0 + assert np.all(t.as_array() == self.data.as_array()) + assert all(t[name].name == name for name in t.colnames) + + def test_partial_names_dtype(self, table_type): + self._setup(table_type) + t = table_type(self.data, names=['e', None, 'd'], dtype=['f4', None, 'i8']) + assert t.colnames == ['e', 'y', 'd'] + assert t['e'].dtype.type == np.float32 + assert t['y'].dtype.type == np.int64 + assert t['d'].dtype.type == np.int64 + assert all(t[name].name == name for name in t.colnames) + + def test_partial_names_ref(self, table_type): + self._setup(table_type) + t = table_type(self.data, names=['e', None, 'd'], copy=False) + assert t.colnames == ['e', 'y', 'd'] + assert t['e'].dtype.type == np.int64 + assert t['y'].dtype.type == np.int64 + assert t['d'].dtype.type == np.float64 + assert all(t[name].name == name for name in t.colnames) + + def test_init_from_columns(self, table_type): + self._setup(table_type) + t = table_type(self.data) + t2 = table_type(t.columns['z', 'x', 'y']) + assert t2.colnames == ['z', 'x', 'y'] + assert t2.dtype.names == ('z', 'x', 'y') + + def test_init_from_columns_slice(self, table_type): + self._setup(table_type) + t = table_type(self.data) + t2 = table_type(t.columns[0:2]) + assert t2.colnames == ['x', 'y'] + assert t2.dtype.names == ('x', 'y') + + def test_init_from_columns_mix(self, table_type): + self._setup(table_type) + t = table_type(self.data) + t2 = table_type([t.columns[0], t.columns['z']]) + assert t2.colnames == ['x', 'z'] + assert t2.dtype.names == ('x', 'z') + + +@pytest.mark.usefixtures('table_type') +class TestInitFromNone(): + # Note table_table.TestEmptyData tests initializing a completely empty + # table and adding data. + + def test_data_none_with_cols(self, table_type): + """ + Test different ways of initing an empty table + """ + np_t = np.empty(0, dtype=[(str('a'), 'f4', (2,)), + (str('b'), 'i4')]) + for kwargs in ({'names': ('a', 'b')}, + {'names': ('a', 'b'), 'dtype': (('f4', (2,)), 'i4')}, + {'dtype': [(str('a'), 'f4', (2,)), (str('b'), 'i4')]}, + {'dtype': np_t.dtype}): + t = table_type(**kwargs) + assert t.colnames == ['a', 'b'] + assert len(t['a']) == 0 + assert len(t['b']) == 0 + if 'dtype' in kwargs: + assert t['a'].dtype.type == np.float32 + assert t['b'].dtype.type == np.int32 + assert t['a'].shape[1:] == (2,) + + +@pytest.mark.usefixtures('table_types') +class TestInitFromRows(): + + def test_init_with_rows(self, table_type): + for rows in ([[1, 'a'], [2, 'b']], + [(1, 'a'), (2, 'b')], + ((1, 'a'), (2, 'b'))): + t = table_type(rows=rows, names=('a', 'b')) + assert np.all(t['a'] == [1, 2]) + assert np.all(t['b'] == ['a', 'b']) + assert t.colnames == ['a', 'b'] + assert t['a'].dtype.kind == 'i' + assert t['b'].dtype.kind in ('S', 'U') + # Regression test for + # https://github.com/astropy/astropy/issues/3052 + assert t['b'].dtype.str.endswith('1') + + rows = np.arange(6).reshape(2, 3) + t = table_type(rows=rows, names=('a', 'b', 'c'), dtype=['f8', 'f4', 'i8']) + assert np.all(t['a'] == [0, 3]) + assert np.all(t['b'] == [1, 4]) + assert np.all(t['c'] == [2, 5]) + assert t.colnames == ['a', 'b', 'c'] + assert t['a'].dtype.str.endswith('f8') + assert t['b'].dtype.str.endswith('f4') + assert t['c'].dtype.str.endswith('i8') + + def test_init_with_rows_and_data(self, table_type): + with pytest.raises(ValueError) as err: + table_type(data=[[1]], rows=[[1]]) + assert "Cannot supply both `data` and `rows` values" in str(err) + + +@pytest.mark.usefixtures('table_type') +def test_init_and_ref_from_multidim_ndarray(table_type): + """ + Test that initializing from an ndarray structured array with + a multi-dim column works for both copy=False and True and that + the referencing is as expected. + """ + for copy in (False, True): + nd = np.array([(1, [10, 20]), + (3, [30, 40])], + dtype=[(str('a'), 'i8'), (str('b'), 'i8', (2,))]) + t = table_type(nd, copy=copy) + assert t.colnames == ['a', 'b'] + assert t['a'].shape == (2,) + assert t['b'].shape == (2, 2) + t['a'][0] = -200 + t['b'][1][1] = -100 + if copy: + assert nd[str('a')][0] == 1 + assert nd[str('b')][1][1] == 40 + else: + assert nd[str('a')][0] == -200 + assert nd[str('b')][1][1] == -100 diff --git a/astropy/table/tests/test_item_access.py b/astropy/table/tests/test_item_access.py new file mode 100644 index 0000000..f91c5e5 --- /dev/null +++ b/astropy/table/tests/test_item_access.py @@ -0,0 +1,263 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +""" Verify item access API in: +https://github.com/astropy/astropy/wiki/Table-item-access-definition +""" + +import pytest +import numpy as np + + +@pytest.mark.usefixtures('table_data') +class BaseTestItems(): + pass + + +@pytest.mark.usefixtures('table_data') +class TestTableColumnsItems(BaseTestItems): + + def test_by_name(self, table_data): + """Access TableColumns by name and show that item access returns + a Column that refers to underlying table data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + assert self.tc['a'].name == 'a' + assert self.tc['a'][1] == 2 + assert self.tc['a'].description == 'da' + assert self.tc['a'].format == 'fa' + assert self.tc['a'].meta == {'ma': 1} + assert self.tc['a'].unit == 'ua' + assert self.tc['a'].attrs_equal(table_data.COLS[0]) + assert isinstance(self.tc['a'], table_data.Column) + + self.tc['b'][1] = 0 + assert self.t['b'][1] == 0 + + def test_by_position(self, table_data): + """Access TableColumns by position and show that item access returns + a Column that refers to underlying table data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + assert self.tc[1].name == 'b' + assert np.all(self.tc[1].data == table_data.COLS[1].data) + assert self.tc[1].description == 'db' + assert self.tc[1].format == 'fb' + assert self.tc[1].meta == {'mb': 1} + assert self.tc[1].unit == 'ub' + assert self.tc[1].attrs_equal(table_data.COLS[1]) + assert isinstance(self.tc[1], table_data.Column) + + assert self.tc[2].unit == 'ub' + + self.tc[1][1] = 0 + assert self.t['b'][1] == 0 + + def test_mult_columns(self, table_data): + """Access TableColumns with "fancy indexing" and showed that returned + TableColumns object still references original data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + tc2 = self.tc['b', 'c'] + assert tc2[1].name == 'c' + assert tc2[1][1] == 8 + assert tc2[0].name == 'b' + assert tc2[0][1] == 5 + + tc2['c'][1] = 0 + assert self.tc['c'][1] == 0 + assert self.t['c'][1] == 0 + + def test_column_slice(self, table_data): + """Access TableColumns with slice and showed that returned + TableColumns object still references original data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + tc2 = self.tc[1:3] + assert tc2[1].name == 'c' + assert tc2[1][1] == 8 + assert tc2[0].name == 'b' + assert tc2[0][1] == 5 + + tc2['c'][1] = 0 + assert self.tc['c'][1] == 0 + assert self.t['c'][1] == 0 + + +@pytest.mark.usefixtures('table_data') +class TestTableItems(BaseTestItems): + + @pytest.mark.parametrize("idx", [1, np.int64(1), np.array(1)]) + def test_column(self, table_data, idx): + """Column access returns REFERENCE to data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + a = self.t['a'] + assert a[idx] == 2 + a[idx] = 0 + assert self.t['a'][idx] == 0 + + @pytest.mark.parametrize("idx", [1, np.int64(1), np.array(1)]) + def test_row(self, table_data, idx): + """Row access returns REFERENCE to data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + row = self.t[idx] + assert row['a'] == 2 + assert row[idx] == 5 + assert row.columns['a'].attrs_equal(table_data.COLS[0]) + assert row.columns['b'].attrs_equal(table_data.COLS[1]) + assert row.columns['c'].attrs_equal(table_data.COLS[2]) + + # Check that setting by col index sets the table and row value + row[idx] = 0 + assert row[idx] == 0 + assert row['b'] == 0 + assert self.t['b'][idx] == 0 + assert self.t[idx]['b'] == 0 + + # Check that setting by col name sets the table and row value + row['a'] = 0 + assert row[0] == 0 + assert row['a'] == 0 + assert self.t['a'][1] == 0 + assert self.t[1]['a'] == 0 + + def test_empty_iterable_item(self, table_data): + """ + Table item access with [], (), or np.array([]) returns the same table + with no rows. + """ + self.t = table_data.Table(table_data.COLS) + for item in [], (), np.array([]): + t2 = self.t[item] + assert not t2 + assert len(t2) == 0 + assert t2['a'].attrs_equal(table_data.COLS[0]) + assert t2['b'].attrs_equal(table_data.COLS[1]) + assert t2['c'].attrs_equal(table_data.COLS[2]) + + def test_table_slice(self, table_data): + """Table slice returns REFERENCE to data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + t2 = self.t[1:3] + assert np.all(t2['a'] == table_data.DATA['a'][1:3]) + assert t2['a'].attrs_equal(table_data.COLS[0]) + assert t2['b'].attrs_equal(table_data.COLS[1]) + assert t2['c'].attrs_equal(table_data.COLS[2]) + t2['a'][0] = 0 + assert np.all(self.t['a'] == np.array([1, 0, 3])) + assert t2.masked == self.t.masked + assert t2._column_class == self.t._column_class + assert isinstance(t2, table_data.Table) + + def test_fancy_index_slice(self, table_data): + """Table fancy slice returns COPY of data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + slice = np.array([0, 2]) + t2 = self.t[slice] + assert np.all(t2['a'] == table_data.DATA['a'][slice]) + assert t2['a'].attrs_equal(table_data.COLS[0]) + assert t2['b'].attrs_equal(table_data.COLS[1]) + assert t2['c'].attrs_equal(table_data.COLS[2]) + t2['a'][0] = 0 + + assert np.all(self.t.as_array() == table_data.DATA) + assert np.any(t2['a'] != table_data.DATA['a'][slice]) + assert t2.masked == self.t.masked + assert t2._column_class == self.t._column_class + assert isinstance(t2, table_data.Table) + + def test_list_index_slice(self, table_data): + """Table list index slice returns COPY of data""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + slice = [0, 2] + t2 = self.t[slice] + assert np.all(t2['a'] == table_data.DATA['a'][slice]) + assert t2['a'].attrs_equal(table_data.COLS[0]) + assert t2['b'].attrs_equal(table_data.COLS[1]) + assert t2['c'].attrs_equal(table_data.COLS[2]) + t2['a'][0] = 0 + + assert np.all(self.t.as_array() == table_data.DATA) + assert np.any(t2['a'] != table_data.DATA['a'][slice]) + assert t2.masked == self.t.masked + assert t2._column_class == self.t._column_class + assert isinstance(t2, table_data.Table) + + def test_select_columns(self, table_data): + """Select columns returns COPY of data and all column + attributes""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + # try both lists and tuples + for columns in (('a', 'c'), ['a', 'c']): + t2 = self.t[columns] + assert np.all(t2['a'] == table_data.DATA['a']) + assert np.all(t2['c'] == table_data.DATA['c']) + assert t2['a'].attrs_equal(table_data.COLS[0]) + assert t2['c'].attrs_equal(table_data.COLS[2]) + t2['a'][0] = 0 + assert np.all(self.t.as_array() == table_data.DATA) + assert np.any(t2['a'] != table_data.DATA['a']) + assert t2.masked == self.t.masked + assert t2._column_class == self.t._column_class + + def test_select_columns_fail(self, table_data): + """Selecting a column that doesn't exist fails""" + self.t = table_data.Table(table_data.COLS) + + with pytest.raises(ValueError) as err: + self.t[['xxxx']] + assert 'Slice name(s) xxxx not valid column name(s)' in str(err) + + with pytest.raises(ValueError) as err: + self.t[['xxxx', 'yyyy']] + assert 'Slice name(s) xxxx, yyyy not valid column name(s)' in str(err) + + def test_np_where(self, table_data): + """Select rows using output of np.where""" + t = table_data.Table(table_data.COLS) + # Select last two rows + rows = np.where(t['a'] > 1.5) + t2 = t[rows] + assert np.all(t2['a'] == [2, 3]) + assert np.all(t2['b'] == [5, 6]) + assert isinstance(t2, table_data.Table) + + # Select no rows + rows = np.where(t['a'] > 100) + t2 = t[rows] + assert len(t2) == 0 + assert isinstance(t2, table_data.Table) + + def test_np_integers(self, table_data): + """ + Select rows using numpy integers. This is a regression test for a + py 3.3 failure mode + """ + t = table_data.Table(table_data.COLS) + idxs = np.random.randint(len(t), size=2) + item = t[idxs[1]] + + def test_select_bad_column(self, table_data): + """Select column name that does not exist""" + self.t = table_data.Table(table_data.COLS) + self.tc = self.t.columns + + with pytest.raises(ValueError): + self.t['a', 1] diff --git a/astropy/table/tests/test_jsviewer.py b/astropy/table/tests/test_jsviewer.py new file mode 100644 index 0000000..51738e1 --- /dev/null +++ b/astropy/table/tests/test_jsviewer.py @@ -0,0 +1,178 @@ +from os.path import abspath, dirname, join +import textwrap + +import pytest + +from ..table import Table +from ... import extern +from ...extern.six.moves import zip + +try: + import IPython # pylint: disable=W0611 +except ImportError: + HAS_IPYTHON = False +else: + HAS_IPYTHON = True + +EXTERN_DIR = abspath(dirname(extern.__file__)) + +REFERENCE = """ + + + + + + + + + + + +
    + + + + + + +%(lines)s +
    ab
    + + +""" + +TPL = (' \n' + ' {0}\n' + ' {1}\n' + ' ') + + +def format_lines(col1, col2): + return '\n'.join(TPL.format(a, b) for a, b in zip(col1, col2)) + + +def test_write_jsviewer_default(tmpdir): + t = Table() + t['a'] = [1, 2, 3, 4, 5] + t['b'] = ['a', 'b', 'c', 'd', 'e'] + t['a'].unit = 'm' + + tmpfile = tmpdir.join('test.html').strpath + + t.write(tmpfile, format='jsviewer') + ref = REFERENCE % dict( + lines=format_lines(t['a'], t['b']), + table_class='display compact', + table_id='table%s' % id(t), + length='50', + display_length='10, 25, 50, 100, 500, 1000', + datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css', + datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js', + jquery_url='https://code.jquery.com/jquery-3.1.1.min.js' + ) + with open(tmpfile) as f: + assert f.read().strip() == ref.strip() + + +def test_write_jsviewer_options(tmpdir): + t = Table() + t['a'] = [1, 2, 3, 4, 5] + t['b'] = ['a', 'b', 'c', 'd', 'e'] + t['a'].unit = 'm' + + tmpfile = tmpdir.join('test.html').strpath + + t.write(tmpfile, format='jsviewer', table_id='test', max_lines=3, + jskwargs={'display_length': 5}, table_class='display hover') + ref = REFERENCE % dict( + lines=format_lines(t['a'][:3], t['b'][:3]), + table_class='display hover', + table_id='test', + length='5', + display_length='5, 10, 25, 50, 100, 500, 1000', + datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css', + datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js', + jquery_url='https://code.jquery.com/jquery-3.1.1.min.js' + ) + with open(tmpfile) as f: + assert f.read().strip() == ref.strip() + + +def test_write_jsviewer_local(tmpdir): + t = Table() + t['a'] = [1, 2, 3, 4, 5] + t['b'] = ['a', 'b', 'c', 'd', 'e'] + t['a'].unit = 'm' + + tmpfile = tmpdir.join('test.html').strpath + + t.write(tmpfile, format='jsviewer', table_id='test', + jskwargs={'use_local_files': True}) + ref = REFERENCE % dict( + lines=format_lines(t['a'], t['b']), + table_class='display compact', + table_id='test', + length='50', + display_length='10, 25, 50, 100, 500, 1000', + datatables_css_url='file://' + join(EXTERN_DIR, 'css', 'jquery.dataTables.css'), + datatables_js_url='file://' + join(EXTERN_DIR, 'js', 'jquery.dataTables.min.js'), + jquery_url='file://' + join(EXTERN_DIR, 'js', 'jquery-3.1.1.min.js') + ) + with open(tmpfile) as f: + assert f.read().strip() == ref.strip() + + +@pytest.mark.skipif('not HAS_IPYTHON') +def test_show_in_notebook(): + t = Table() + t['a'] = [1, 2, 3, 4, 5] + t['b'] = ['b', 'c', 'a', 'd', 'e'] + + htmlstr_windx = t.show_in_notebook().data # should default to 'idx' + htmlstr_windx_named = t.show_in_notebook(show_row_index='realidx').data + htmlstr_woindx = t.show_in_notebook(show_row_index=False).data + + assert (textwrap.dedent(""" + idxab + 01b + 12c + 23a + 34d + 45e + """).strip() in htmlstr_windx) + + assert 'realidxab' in htmlstr_windx_named + + assert 'ab' in htmlstr_woindx diff --git a/astropy/table/tests/test_masked.py b/astropy/table/tests/test_masked.py new file mode 100644 index 0000000..c65307e --- /dev/null +++ b/astropy/table/tests/test_masked.py @@ -0,0 +1,417 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +"""Test behavior related to masked tables""" + +import pytest +import numpy as np +import numpy.ma as ma + +from ...table import Column, MaskedColumn, Table + + +class SetupData(object): + def setup_method(self, method): + self.a = MaskedColumn(name='a', data=[1, 2, 3], fill_value=1) + self.b = MaskedColumn(name='b', data=[4, 5, 6], mask=True) + self.c = MaskedColumn(name='c', data=[7, 8, 9], mask=False) + self.d_mask = np.array([False, True, False]) + self.d = MaskedColumn(name='d', data=[7, 8, 7], mask=self.d_mask) + self.t = Table([self.a, self.b], masked=True) + self.ca = Column(name='ca', data=[1, 2, 3]) + + +class TestPprint(SetupData): + def test_pformat(self): + assert self.t.pformat() == [' a b ', '--- ---', ' 1 --', ' 2 --', ' 3 --'] + + +class TestFilled(object): + """Test the filled method in MaskedColumn and Table""" + + def setup_method(self, method): + mask = [True, False, False] + self.meta = {'a': 1, 'b': [2, 3]} + a = self.a = MaskedColumn(name='a', data=[1, 2, 3], fill_value=10, mask=mask, meta={'a': 1}) + b = self.b = MaskedColumn(name='b', data=[4.0, 5.0, 6.0], fill_value=10.0, mask=mask) + c = self.c = MaskedColumn(name='c', data=['7', '8', '9'], fill_value='1', mask=mask) + + def test_filled_column(self): + f = self.a.filled() + assert np.all(f == [10, 2, 3]) + assert isinstance(f, Column) + assert not isinstance(f, MaskedColumn) + + # Confirm copy, not ref + assert f.meta['a'] == 1 + f.meta['a'] = 2 + f[1] = 100 + assert self.a[1] == 2 + assert self.a.meta['a'] == 1 + + # Fill with arg fill_value not column fill_value + f = self.a.filled(20) + assert np.all(f == [20, 2, 3]) + + f = self.b.filled() + assert np.all(f == [10.0, 5.0, 6.0]) + assert isinstance(f, Column) + + f = self.c.filled() + assert np.all(f == ['1', '8', '9']) + assert isinstance(f, Column) + + def test_filled_masked_table(self, tableclass): + t = tableclass([self.a, self.b, self.c], meta=self.meta) + + f = t.filled() + assert isinstance(f, Table) + assert f.masked is False + assert np.all(f['a'] == [10, 2, 3]) + assert np.allclose(f['b'], [10.0, 5.0, 6.0]) + assert np.all(f['c'] == ['1', '8', '9']) + + # Confirm copy, not ref + assert f.meta['b'] == [2, 3] + f.meta['b'][0] = 20 + assert t.meta['b'] == [2, 3] + f['a'][2] = 100 + assert t['a'][2] == 3 + + def test_filled_unmasked_table(self, tableclass): + t = tableclass([(1, 2), ('3', '4')], names=('a', 'b'), meta=self.meta) + f = t.filled() + assert isinstance(f, Table) + assert f.masked is False + assert np.all(f['a'] == t['a']) + assert np.all(f['b'] == t['b']) + + # Confirm copy, not ref + assert f.meta['b'] == [2, 3] + f.meta['b'][0] = 20 + assert t.meta['b'] == [2, 3] + f['a'][1] = 100 + assert t['a'][1] == 2 + + +class TestFillValue(SetupData): + """Test setting and getting fill value in MaskedColumn and Table""" + + def test_init_set_fill_value(self): + """Check that setting fill_value in the MaskedColumn init works""" + assert self.a.fill_value == 1 + c = MaskedColumn(name='c', data=['xxxx', 'yyyy'], fill_value='none') + assert c.fill_value == 'none' + + def test_set_get_fill_value_for_bare_column(self): + """Check set and get of fill value works for bare Column""" + self.d.fill_value = -999 + assert self.d.fill_value == -999 + assert np.all(self.d.filled() == [7, -999, 7]) + + def test_set_get_fill_value_for_str_column(self): + c = MaskedColumn(name='c', data=['xxxx', 'yyyy'], mask=[True, False]) + # assert np.all(c.filled() == ['N/A', 'yyyy']) + c.fill_value = 'ABCDEF' + assert c.fill_value == 'ABCD' # string truncated to dtype length + assert np.all(c.filled() == ['ABCD', 'yyyy']) + assert np.all(c.filled('XY') == ['XY', 'yyyy']) + + def test_table_column_mask_not_ref(self): + """Table column mask is not ref of original column mask""" + self.b.fill_value = -999 + assert self.t['b'].fill_value != -999 + + def test_set_get_fill_value_for_table_column(self): + """Check set and get of fill value works for Column in a Table""" + self.t['b'].fill_value = 1 + assert self.t['b'].fill_value == 1 + assert np.all(self.t['b'].filled() == [1, 1, 1]) + + def test_data_attribute_fill_and_mask(self): + """Check that .data attribute preserves fill_value and mask""" + self.t['b'].fill_value = 1 + self.t['b'].mask = [True, False, True] + assert self.t['b'].data.fill_value == 1 + assert np.all(self.t['b'].data.mask == [True, False, True]) + + +class TestMaskedColumnInit(SetupData): + """Initialization of a masked column""" + + def test_set_mask_and_not_ref(self): + """Check that mask gets set properly and that it is a copy, not ref""" + assert np.all(~self.a.mask) + assert np.all(self.b.mask) + assert np.all(~self.c.mask) + assert np.all(self.d.mask == self.d_mask) + self.d.mask[0] = True + assert not np.all(self.d.mask == self.d_mask) + + def test_set_mask_from_list(self): + """Set mask from a list""" + mask_list = [False, True, False] + a = MaskedColumn(name='a', data=[1, 2, 3], mask=mask_list) + assert np.all(a.mask == mask_list) + + def test_override_existing_mask(self): + """Override existing mask values""" + mask_list = [False, True, False] + b = MaskedColumn(name='b', data=self.b, mask=mask_list) + assert np.all(b.mask == mask_list) + + def test_incomplete_mask_spec(self): + """Incomplete mask specification raises MaskError""" + mask_list = [False, True] + with pytest.raises(ma.MaskError): + MaskedColumn(name='b', length=4, mask=mask_list) + + +class TestTableInit(SetupData): + """Initializing a table""" + + def test_mask_true_if_any_input_masked(self): + """Masking is True if any input is masked""" + t = Table([self.ca, self.a]) + assert t.masked is True + t = Table([self.ca]) + assert t.masked is False + t = Table([self.ca, ma.array([1, 2, 3])]) + assert t.masked is True + + def test_mask_false_if_no_input_masked(self): + """Masking not true if not (requested or input requires mask)""" + t0 = Table([[3, 4]], masked=False) + t1 = Table(t0, masked=True) + t2 = Table(t1, masked=False) + assert not t0.masked + assert t1.masked + assert not t2.masked + + def test_mask_property(self): + t = self.t + # Access table mask (boolean structured array) by column name + assert np.all(t.mask['a'] == np.array([False, False, False])) + assert np.all(t.mask['b'] == np.array([True, True, True])) + # Check that setting mask from table mask has the desired effect on column + t.mask['b'] = np.array([False, True, False]) + assert np.all(t['b'].mask == np.array([False, True, False])) + # Non-masked table returns None for mask attribute + t2 = Table([self.ca], masked=False) + assert t2.mask is None + # Set mask property globally and verify local correctness + for mask in (True, False): + t.mask = mask + for name in ('a', 'b'): + assert np.all(t[name].mask == mask) + + +class TestAddColumn(object): + + def test_add_masked_column_to_masked_table(self): + t = Table(masked=True) + assert t.masked + t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0])) + assert t.masked + t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1])) + assert t.masked + assert np.all(t['a'] == np.array([1, 2, 3])) + assert np.all(t['a'].mask == np.array([0, 1, 0], bool)) + assert np.all(t['b'] == np.array([4, 5, 6])) + assert np.all(t['b'].mask == np.array([1, 0, 1], bool)) + + def test_add_masked_column_to_non_masked_table(self): + t = Table(masked=False) + assert not t.masked + t.add_column(Column(name='a', data=[1, 2, 3])) + assert not t.masked + t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1])) + assert t.masked + assert np.all(t['a'] == np.array([1, 2, 3])) + assert np.all(t['a'].mask == np.array([0, 0, 0], bool)) + assert np.all(t['b'] == np.array([4, 5, 6])) + assert np.all(t['b'].mask == np.array([1, 0, 1], bool)) + + def test_add_non_masked_column_to_masked_table(self): + t = Table(masked=True) + assert t.masked + t.add_column(Column(name='a', data=[1, 2, 3])) + assert t.masked + t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1])) + assert t.masked + assert np.all(t['a'] == np.array([1, 2, 3])) + assert np.all(t['a'].mask == np.array([0, 0, 0], bool)) + assert np.all(t['b'] == np.array([4, 5, 6])) + assert np.all(t['b'].mask == np.array([1, 0, 1], bool)) + + def test_convert_to_masked_table_only_if_necessary(self): + # Do not convert to masked table, if new column has no masked value. + # See #1185 for details. + t = Table(masked=False) + assert not t.masked + t.add_column(Column(name='a', data=[1, 2, 3])) + assert not t.masked + t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[0, 0, 0])) + assert not t.masked + assert np.all(t['a'] == np.array([1, 2, 3])) + assert np.all(t['b'] == np.array([4, 5, 6])) + + +class TestRenameColumn(object): + + def test_rename_masked_column(self): + t = Table(masked=True) + t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0])) + t['a'].fill_value = 42 + t.rename_column('a', 'b') + assert t.masked + assert np.all(t['b'] == np.array([1, 2, 3])) + assert np.all(t['b'].mask == np.array([0, 1, 0], bool)) + assert t['b'].fill_value == 42 + assert t.colnames == ['b'] + + +class TestRemoveColumn(object): + + def test_remove_masked_column(self): + t = Table(masked=True) + t.add_column(MaskedColumn(name='a', data=[1, 2, 3], mask=[0, 1, 0])) + t['a'].fill_value = 42 + t.add_column(MaskedColumn(name='b', data=[4, 5, 6], mask=[1, 0, 1])) + t.remove_column('b') + assert t.masked + assert np.all(t['a'] == np.array([1, 2, 3])) + assert np.all(t['a'].mask == np.array([0, 1, 0], bool)) + assert t['a'].fill_value == 42 + assert t.colnames == ['a'] + + +class TestAddRow(object): + + def test_add_masked_row_to_masked_table_iterable(self): + t = Table(masked=True) + t.add_column(MaskedColumn(name='a', data=[1], mask=[0])) + t.add_column(MaskedColumn(name='b', data=[4], mask=[1])) + t.add_row([2, 5], mask=[1, 0]) + t.add_row([3, 6], mask=[0, 1]) + assert t.masked + assert np.all(np.array(t['a']) == np.array([1, 2, 3])) + assert np.all(t['a'].mask == np.array([0, 1, 0], bool)) + assert np.all(np.array(t['b']) == np.array([4, 5, 6])) + assert np.all(t['b'].mask == np.array([1, 0, 1], bool)) + + def test_add_masked_row_to_masked_table_mapping1(self): + t = Table(masked=True) + t.add_column(MaskedColumn(name='a', data=[1], mask=[0])) + t.add_column(MaskedColumn(name='b', data=[4], mask=[1])) + t.add_row({'b': 5, 'a': 2}, mask={'a': 1, 'b': 0}) + t.add_row({'a': 3, 'b': 6}, mask={'b': 1, 'a': 0}) + assert t.masked + assert np.all(np.array(t['a']) == np.array([1, 2, 3])) + assert np.all(t['a'].mask == np.array([0, 1, 0], bool)) + assert np.all(np.array(t['b']) == np.array([4, 5, 6])) + assert np.all(t['b'].mask == np.array([1, 0, 1], bool)) + + def test_add_masked_row_to_masked_table_mapping2(self): + # When adding values to a masked table, if the mask is specified as a + # dict, then values not specified will have mask values set to True + t = Table(masked=True) + t.add_column(MaskedColumn(name='a', data=[1], mask=[0])) + t.add_column(MaskedColumn(name='b', data=[4], mask=[1])) + t.add_row({'b': 5}, mask={'b': 0}) + t.add_row({'a': 3}, mask={'a': 0}) + assert t.masked + assert t['a'][0] == 1 and t['a'][2] == 3 + assert np.all(t['a'].mask == np.array([0, 1, 0], bool)) + assert t['b'][1] == 5 + assert np.all(t['b'].mask == np.array([1, 0, 1], bool)) + + def test_add_masked_row_to_masked_table_mapping3(self): + # When adding values to a masked table, if mask is not passed to + # add_row, then the mask should be set to False if values are present + # and True if not. + t = Table(masked=True) + t.add_column(MaskedColumn(name='a', data=[1], mask=[0])) + t.add_column(MaskedColumn(name='b', data=[4], mask=[1])) + t.add_row({'b': 5}) + t.add_row({'a': 3}) + assert t.masked + assert t['a'][0] == 1 and t['a'][2] == 3 + assert np.all(t['a'].mask == np.array([0, 1, 0], bool)) + assert t['b'][1] == 5 + assert np.all(t['b'].mask == np.array([1, 0, 1], bool)) + + def test_add_masked_row_to_masked_table_mapping4(self): + # When adding values to a masked table, if the mask is specified as a + # dict, then keys in values should match keys in mask + t = Table(masked=True) + t.add_column(MaskedColumn(name='a', data=[1], mask=[0])) + t.add_column(MaskedColumn(name='b', data=[4], mask=[1])) + with pytest.raises(ValueError) as exc: + t.add_row({'b': 5}, mask={'a': True}) + assert exc.value.args[0] == 'keys in mask should match keys in vals' + + def test_add_masked_row_to_masked_table_mismatch(self): + t = Table(masked=True) + t.add_column(MaskedColumn(name='a', data=[1], mask=[0])) + t.add_column(MaskedColumn(name='b', data=[4], mask=[1])) + with pytest.raises(TypeError) as exc: + t.add_row([2, 5], mask={'a': 1, 'b': 0}) + assert exc.value.args[0] == "Mismatch between type of vals and mask" + with pytest.raises(TypeError) as exc: + t.add_row({'b': 5, 'a': 2}, mask=[1, 0]) + assert exc.value.args[0] == "Mismatch between type of vals and mask" + + def test_add_masked_row_to_non_masked_table_iterable(self): + t = Table(masked=False) + t.add_column(Column(name='a', data=[1])) + t.add_column(Column(name='b', data=[4])) + assert not t.masked + t.add_row([2, 5]) + assert not t.masked + t.add_row([3, 6], mask=[0, 1]) + assert t.masked + assert np.all(np.array(t['a']) == np.array([1, 2, 3])) + assert np.all(t['a'].mask == np.array([0, 0, 0], bool)) + assert np.all(np.array(t['b']) == np.array([4, 5, 6])) + assert np.all(t['b'].mask == np.array([0, 0, 1], bool)) + + +def test_setting_from_masked_column(): + """Test issue in #2997""" + mask_b = np.array([True, True, False, False]) + for select in (mask_b, slice(0, 2)): + t = Table(masked=True) + t['a'] = Column([1, 2, 3, 4]) + t['b'] = MaskedColumn([11, 22, 33, 44], mask=mask_b) + t['c'] = MaskedColumn([111, 222, 333, 444], mask=[True, False, True, False]) + + t['b'][select] = t['c'][select] + assert t['b'][1] == t[1]['b'] + assert t['b'][0] is np.ma.masked # Original state since t['c'][0] is masked + assert t['b'][1] == 222 # New from t['c'] since t['c'][1] is unmasked + assert t['b'][2] == 33 + assert t['b'][3] == 44 + assert np.all(t['b'].mask == t.mask['b']) # Avoid t.mask in general, this is for testing + + mask_before_add = t.mask.copy() + t['d'] = np.arange(len(t)) + assert np.all(t.mask['b'] == mask_before_add['b']) + + +def test_coercing_fill_value_type(): + """ + Test that masked column fill_value is coerced into the correct column type. + """ + # This is the original example posted on the astropy@scipy mailing list + t = Table({'a': ['1']}, masked=True) + t['a'].set_fill_value('0') + t2 = Table(t, names=['a'], dtype=[np.int32]) + assert isinstance(t2['a'].fill_value, np.int32) + + # Unit test the same thing. + c = MaskedColumn(['1']) + c.set_fill_value('0') + c2 = MaskedColumn(c, dtype=np.int32) + assert isinstance(c2.fill_value, np.int32) diff --git a/astropy/table/tests/test_mixin.py b/astropy/table/tests/test_mixin.py new file mode 100644 index 0000000..7ee5d4a --- /dev/null +++ b/astropy/table/tests/test_mixin.py @@ -0,0 +1,618 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +try: + import h5py # pylint: disable=W0611 +except ImportError: + HAS_H5PY = False +else: + HAS_H5PY = True + +try: + import yaml # pylint: disable=W0611 + HAS_YAML = True +except ImportError: + HAS_YAML = False + +import copy + +import pytest +import numpy as np + +from ...extern import six +from ...extern.six.moves import cPickle as pickle, cStringIO as StringIO +from ...table import Table, QTable, join, hstack, vstack, Column, NdarrayMixin +from ... import time +from ... import coordinates +from ... import units as u +from ..column import BaseColumn +from .. import table_helpers +from .conftest import MIXIN_COLS + + +def test_attributes(mixin_cols): + """ + Required attributes for a column can be set. + """ + m = mixin_cols['m'] + m.info.name = 'a' + assert m.info.name == 'a' + + m.info.description = 'a' + assert m.info.description == 'a' + + # Cannot set unit for these classes + if isinstance(m, (u.Quantity, coordinates.SkyCoord, time.Time)): + with pytest.raises(AttributeError): + m.info.unit = u.m + else: + m.info.unit = u.m + assert m.info.unit is u.m + + m.info.format = 'a' + assert m.info.format == 'a' + + m.info.meta = {'a': 1} + assert m.info.meta == {'a': 1} + + with pytest.raises(AttributeError): + m.info.bad_attr = 1 + + with pytest.raises(AttributeError): + m.info.bad_attr + + +def check_mixin_type(table, table_col, in_col): + # We check for QuantityInfo rather than just isinstance(col, u.Quantity) + # since we want to treat EarthLocation as a mixin, even though it is + # a Quantity subclass. + if ((isinstance(in_col.info, u.QuantityInfo) and type(table) is not QTable) + or isinstance(in_col, Column)): + assert type(table_col) is table.ColumnClass + else: + assert type(table_col) is type(in_col) + + # Make sure in_col got copied and creating table did not touch it + assert in_col.info.name is None + + +def test_make_table(table_types, mixin_cols): + """ + Make a table with the columns in mixin_cols, which is an ordered dict of + three cols: 'a' and 'b' are table_types.Column type, and 'm' is a mixin. + """ + t = table_types.Table(mixin_cols) + check_mixin_type(t, t['m'], mixin_cols['m']) + + cols = list(mixin_cols.values()) + t = table_types.Table(cols, names=('i', 'a', 'b', 'm')) + check_mixin_type(t, t['m'], mixin_cols['m']) + + t = table_types.Table(cols) + check_mixin_type(t, t['col3'], mixin_cols['m']) + + +def test_io_ascii_write(): + """ + Test that table with mixin column can be written by io.ascii for + every pure Python writer. No validation of the output is done, + this just confirms no exceptions. + """ + from ...io.ascii.connect import _get_connectors_table + t = QTable(MIXIN_COLS) + for fmt in _get_connectors_table(): + if fmt['Format'] == 'ascii.ecsv' and not HAS_YAML: + continue + if fmt['Write'] and '.fast_' not in fmt['Format']: + out = StringIO() + t.write(out, format=fmt['Format']) + + +def test_io_quantity_write(tmpdir): + """ + Test that table with Quantity mixin column can be written by io.fits, + io.votable but not by io.misc.hdf5. Validation of the output is done. + Test that io.fits writes a table containing Quantity mixin columns that can + be round-tripped (metadata unit). + """ + t = QTable() + t['a'] = u.Quantity([1, 2, 4], unit='Angstrom') + + filename = tmpdir.join("table-tmp").strpath + open(filename, 'w').close() + + # Show that FITS and VOTable formats succeed + for fmt in ('fits', 'votable'): + t.write(filename, format=fmt, overwrite=True) + qt = QTable.read(filename, format=fmt) + assert isinstance(qt['a'], u.Quantity) + assert qt['a'].unit == 'Angstrom' + + # Show that HDF5 format fails + if HAS_H5PY: + with pytest.raises(ValueError) as err: + t.write(filename, format='hdf5', overwrite=True) + assert 'cannot write table with mixin column(s)' in str(err.value) + + +def test_io_write_fail(mixin_cols): + """ + Test that table with mixin column (excluding Quantity) cannot be written by io.votable, + io.fits, and io.misc.hdf5. + """ + t = QTable(mixin_cols) + # Only do this test if there are unsupported column types (i.e. anything besides + # BaseColumn or Quantity subclasses. + unsupported_cols = t.columns.not_isinstance((BaseColumn, u.Quantity)) + if not unsupported_cols: + pytest.skip("no unsupported column types") + for fmt in ('fits', 'votable', 'hdf5'): + if fmt == 'hdf5' and not HAS_H5PY: + continue + out = StringIO() + with pytest.raises(ValueError) as err: + t.write(out, format=fmt) + assert 'cannot write table with mixin column(s)' in str(err.value) + + +def test_join(table_types): + """ + Join tables with mixin cols. Use column "i" as proxy for what the + result should be for each mixin. + """ + t1 = table_types.Table() + t1['a'] = table_types.Column(['a', 'b', 'b', 'c']) + t1['i'] = table_types.Column([0, 1, 2, 3]) + for name, col in MIXIN_COLS.items(): + t1[name] = col + + t2 = table_types.Table(t1) + t2['a'] = ['b', 'c', 'a', 'd'] + + for name, col in MIXIN_COLS.items(): + t1[name].info.description = name + t2[name].info.description = name + '2' + + for join_type in ('inner', 'left'): + t12 = join(t1, t2, keys='a', join_type=join_type) + idx1 = t12['i_1'] + idx2 = t12['i_2'] + for name, col in MIXIN_COLS.items(): + name1 = name + '_1' + name2 = name + '_2' + assert_table_name_col_equal(t12, name1, col[idx1]) + assert_table_name_col_equal(t12, name2, col[idx2]) + assert t12[name1].info.description == name + assert t12[name2].info.description == name + '2' + + for join_type in ('outer', 'right'): + with pytest.raises(NotImplementedError) as exc: + t12 = join(t1, t2, keys='a', join_type=join_type) + assert 'join requires masking column' in str(exc.value) + + with pytest.raises(ValueError) as exc: + t12 = join(t1, t2, keys=['a', 'skycoord']) + assert 'not allowed as a key column' in str(exc.value) + + # Join does work for a mixin which is a subclass of np.ndarray + t12 = join(t1, t2, keys=['quantity']) + assert np.all(t12['a_1'] == t1['a']) + + +def test_hstack(table_types): + """ + Hstack tables with mixin cols. Use column "i" as proxy for what the + result should be for each mixin. + """ + t1 = table_types.Table() + t1['i'] = table_types.Column([0, 1, 2, 3]) + for name, col in MIXIN_COLS.items(): + t1[name] = col + t1[name].info.description = name + t1[name].info.meta = {'a': 1} + + for join_type in ('inner', 'outer'): + for chop in (True, False): + t2 = table_types.Table(t1) + if chop: + t2 = t2[:-1] + if join_type == 'outer': + with pytest.raises(NotImplementedError) as exc: + t12 = hstack([t1, t2], join_type=join_type) + assert 'hstack requires masking column' in str(exc.value) + continue + + t12 = hstack([t1, t2], join_type=join_type) + idx1 = t12['i_1'] + idx2 = t12['i_2'] + for name, col in MIXIN_COLS.items(): + name1 = name + '_1' + name2 = name + '_2' + assert_table_name_col_equal(t12, name1, col[idx1]) + assert_table_name_col_equal(t12, name2, col[idx2]) + for attr in ('description', 'meta'): + assert getattr(t1[name].info, attr) == getattr(t12[name1].info, attr) + assert getattr(t2[name].info, attr) == getattr(t12[name2].info, attr) + + +def assert_table_name_col_equal(t, name, col): + """ + Assert all(t[name] == col), with special handling for known mixin cols. + """ + if isinstance(col, coordinates.SkyCoord): + assert np.all(t[name].ra == col.ra) + assert np.all(t[name].dec == col.dec) + elif isinstance(col, u.Quantity): + if type(t) is QTable: + assert np.all(t[name] == col) + elif isinstance(col, table_helpers.ArrayWrapper): + assert np.all(t[name].data == col.data) + else: + assert np.all(t[name] == col) + + +def test_get_items(mixin_cols): + """ + Test that slicing / indexing table gives right values and col attrs inherit + """ + attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta') + m = mixin_cols['m'] + m.info.name = 'm' + m.info.format = '{0}' + m.info.description = 'd' + m.info.meta = {'a': 1} + t = QTable([m]) + for item in ([1, 3], np.array([0, 2]), slice(1, 3)): + t2 = t[item] + m2 = m[item] + assert_table_name_col_equal(t2, 'm', m[item]) + for attr in attrs: + assert getattr(t2['m'].info, attr) == getattr(m.info, attr) + assert getattr(m2.info, attr) == getattr(m.info, attr) + + +def test_info_preserved_pickle_copy_init(mixin_cols): + """ + Test copy, pickle, and init from class roundtrip preserve info. This + tests not only the mixin classes but a regular column as well. + """ + def pickle_roundtrip(c): + return pickle.loads(pickle.dumps(c)) + + def init_from_class(c): + return c.__class__(c) + + attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta') + for colname in ('i', 'm'): + m = mixin_cols[colname] + m.info.name = colname + m.info.format = '{0}' + m.info.description = 'd' + m.info.meta = {'a': 1} + for func in (copy.copy, copy.deepcopy, pickle_roundtrip, init_from_class): + m2 = func(m) + for attr in attrs: + assert getattr(m2.info, attr) == getattr(m.info, attr) + + +def test_add_column(mixin_cols): + """ + Test that adding a column preserves values and attributes + """ + attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta') + m = mixin_cols['m'] + assert m.info.name is None + + # Make sure adding column in various ways doesn't touch + t = QTable([m], names=['a']) + assert m.info.name is None + + t['new'] = m + assert m.info.name is None + + m.info.name = 'm' + m.info.format = '{0}' + m.info.description = 'd' + m.info.meta = {'a': 1} + t = QTable([m]) + + # Add columns m2, m3, m4 by two different methods and test expected equality + t['m2'] = m + m.info.name = 'm3' + t.add_columns([m], copy=True) + m.info.name = 'm4' + t.add_columns([m], copy=False) + for name in ('m2', 'm3', 'm4'): + assert_table_name_col_equal(t, name, m) + for attr in attrs: + if attr != 'name': + assert getattr(t['m'].info, attr) == getattr(t[name].info, attr) + # Also check that one can set using a scalar. + s = m[0] + if type(s) is type(m): + # We're not going to worry about testing classes for which scalars + # are a different class than the real array (and thus loose info, etc.) + t['s'] = m[0] + assert_table_name_col_equal(t, 's', m[0]) + for attr in attrs: + if attr != 'name': + assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr) + + # While we're add it, also check a length-1 table. + t = QTable([m[1:2]], names=['m']) + if type(s) is type(m): + t['s'] = m[0] + assert_table_name_col_equal(t, 's', m[0]) + for attr in attrs: + if attr != 'name': + assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr) + + +def test_vstack(): + """ + Vstack tables with mixin cols. + """ + t1 = QTable(MIXIN_COLS) + t2 = QTable(MIXIN_COLS) + with pytest.raises(NotImplementedError): + vstack([t1, t2]) + + +def test_insert_row(mixin_cols): + """ + Test inserting a row, which only works for BaseColumn and Quantity + """ + t = QTable(mixin_cols) + t['m'].info.description = 'd' + if isinstance(t['m'], (u.Quantity, Column)): + t.insert_row(1, t[-1]) + assert t[1] == t[-1] + assert t['m'].info.description == 'd' + else: + with pytest.raises(ValueError) as exc: + t.insert_row(1, t[-1]) + assert "Unable to insert row" in str(exc.value) + + +def test_insert_row_bad_unit(): + """ + Insert a row into a QTable with the wrong unit + """ + t = QTable([[1] * u.m]) + with pytest.raises(ValueError) as exc: + t.insert_row(0, (2 * u.m / u.s,)) + assert "'m / s' (speed) and 'm' (length) are not convertible" in str(exc.value) + + +def test_convert_np_array(mixin_cols): + """ + Test that converting to numpy array creates an object dtype and that + each instance in the array has the expected type. + """ + t = QTable(mixin_cols) + ta = t.as_array() + m = mixin_cols['m'] + dtype_kind = m.dtype.kind if hasattr(m, 'dtype') else 'O' + assert ta['m'].dtype.kind == dtype_kind + + +def test_assignment_and_copy(): + """ + Test that assignment of an int, slice, and fancy index works. + Along the way test that copying table works. + """ + for name in ('quantity', 'arraywrap'): + m = MIXIN_COLS[name] + t0 = QTable([m], names=['m']) + for i0, i1 in ((1, 2), + (slice(0, 2), slice(1, 3)), + (np.array([1, 2]), np.array([2, 3]))): + t = t0.copy() + t['m'][i0] = m[i1] + if name == 'arraywrap': + assert np.all(t['m'].data[i0] == m.data[i1]) + assert np.all(t0['m'].data[i0] == m.data[i0]) + assert np.all(t0['m'].data[i0] != t['m'].data[i0]) + else: + assert np.all(t['m'][i0] == m[i1]) + assert np.all(t0['m'][i0] == m[i0]) + assert np.all(t0['m'][i0] != t['m'][i0]) + + +def test_grouping(): + """ + Test grouping with mixin columns. Raises not yet implemented error. + """ + t = QTable(MIXIN_COLS) + t['index'] = ['a', 'b', 'b', 'c'] + with pytest.raises(NotImplementedError): + t.group_by('index') + + +def test_conversion_qtable_table(): + """ + Test that a table round trips from QTable => Table => QTable + """ + qt = QTable(MIXIN_COLS) + names = qt.colnames + for name in names: + qt[name].info.description = name + + t = Table(qt) + for name in names: + assert t[name].info.description == name + if name == 'quantity': + assert np.all(t['quantity'] == qt['quantity'].value) + assert np.all(t['quantity'].unit is qt['quantity'].unit) + assert isinstance(t['quantity'], t.ColumnClass) + else: + assert_table_name_col_equal(t, name, qt[name]) + + qt2 = QTable(qt) + for name in names: + assert qt2[name].info.description == name + assert_table_name_col_equal(qt2, name, qt[name]) + + +def test_setitem_as_column_name(): + """ + Test for mixin-related regression described in #3321. + """ + t = Table() + t['a'] = ['x', 'y'] + t['b'] = 'b' # Previously was failing with KeyError + assert np.all(t['a'] == ['x', 'y']) + assert np.all(t['b'] == ['b', 'b']) + + +def test_quantity_representation(): + """ + Test that table representation of quantities does not have unit + """ + t = QTable([[1, 2] * u.m]) + assert t.pformat() == ['col0', + ' m ', + '----', + ' 1.0', + ' 2.0'] + + +def test_skycoord_representation(): + """ + Test that skycoord representation works, both in the way that the + values are output and in changing the frame representation. + """ + # With no unit we get "None" in the unit row + c = coordinates.SkyCoord([0], [1], [0], representation='cartesian') + t = Table([c]) + assert t.pformat() == [' col0 ', + 'None,None,None', + '--------------', + ' 0.0,1.0,0.0'] + + # Test that info works with a dynamically changed representation + c = coordinates.SkyCoord([0], [1], [0], unit='m', representation='cartesian') + t = Table([c]) + assert t.pformat() == [' col0 ', + ' m,m,m ', + '-----------', + '0.0,1.0,0.0'] + + t['col0'].representation = 'unitspherical' + assert t.pformat() == [' col0 ', + 'deg,deg ', + '--------', + '90.0,0.0'] + + t['col0'].representation = 'cylindrical' + assert t.pformat() == [' col0 ', + ' m,deg,m ', + '------------', + '1.0,90.0,0.0'] + + +def test_ndarray_mixin(): + """ + Test directly adding a plain structured array into a table instead of the + view as an NdarrayMixin. Once added as an NdarrayMixin then all the previous + tests apply. + """ + a = np.array([(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')], + dtype=''.format(id=id(t)), + 'col0 [2]col1 [2]col2 [2]', + '1 .. 23 .. 45 .. 6', + '10 .. 2030 .. 4050 .. 60', + ''] + nbclass = table.conf.default_notebook_table_class + assert t._repr_html_().splitlines() == [ + '<{0} masked={1} length=2>'.format(table_type.__name__, t.masked), + ''.format(id=id(t), nbclass=nbclass), + '', + '', + '', + '', + '
    col0 [2]col1 [2]col2 [2]
    int64int64int64
    1 .. 23 .. 45 .. 6
    10 .. 2030 .. 4050 .. 60
    '] + + t = table_type([arr]) + lines = t.pformat() + assert lines == ['col0 [2,2]', + '----------', + ' 1 .. 20', + ' 3 .. 40', + ' 5 .. 60'] + + def test_fake_multidim(self, table_type): + """Test printing with 'fake' multidimensional column""" + arr = [np.array([[(1,)], + [(10,)]], dtype=np.int64), + np.array([[(3,)], + [(30,)]], dtype=np.int64), + np.array([[(5,)], + [(50,)]], dtype=np.int64)] + t = table_type(arr) + lines = t.pformat() + assert lines == ['col0 [1,1] col1 [1,1] col2 [1,1]', + '---------- ---------- ----------', + ' 1 3 5', + ' 10 30 50'] + + lines = t.pformat(html=True) + assert lines == [''.format(id=id(t)), + '', + '', + '', + '
    col0 [1,1]col1 [1,1]col2 [1,1]
    135
    103050
    '] + nbclass = table.conf.default_notebook_table_class + assert t._repr_html_().splitlines() == [ + '<{0} masked={1} length=2>'.format(table_type.__name__, t.masked), + ''.format(id=id(t), nbclass=nbclass), + '', + '', + '', u'', + '
    col0 [1,1]col1 [1,1]col2 [1,1]
    int64int64int64
    135
    103050
    '] + + t = table_type([arr]) + lines = t.pformat() + assert lines == ['col0 [2,1,1]', + '------------', + ' 1 .. 10', + ' 3 .. 30', + ' 5 .. 50'] + + +def test_html_escaping(): + t = table.Table([(str(''), 2, 3)]) + nbclass = table.conf.default_notebook_table_class + assert t._repr_html_().splitlines() == [ + '<Table length=3>', + ''.format(id=id(t), nbclass=nbclass), + '', + '', + '', + '', + '', + '
    col0
    str33
    <script>alert("gotcha");</script>
    2
    3
    '] + + +@pytest.mark.usefixtures('table_type') +class TestPprint(): + + def _setup(self, table_type): + self.tb = table_type(BIG_WIDE_ARR) + self.tb['col0'].format = 'e' + self.tb['col1'].format = '.6f' + + self.tb['col0'].unit = 'km**2' + self.tb['col19'].unit = 'kg s m**-2' + self.ts = table_type(SMALL_ARR) + + def test_empty_table(self, table_type): + t = table_type() + lines = t.pformat() + assert lines == [''] + c = repr(t) + assert c.splitlines() == ['<{0} masked={1} length=0>'.format(table_type.__name__, t.masked), + ''] + + def test_format0(self, table_type): + """Try getting screen size but fail to defaults because testing doesn't + have access to screen (fcntl.ioctl fails). + """ + self._setup(table_type) + arr = np.arange(4000, dtype=np.float64).reshape(100, 40) + lines = table_type(arr).pformat() + nlines, width = console.terminal_size() + assert len(lines) == nlines + for line in lines[:-1]: # skip last "Length = .. rows" line + assert (len(line) > width - 10 and + len(line) <= width) + + def test_format1(self, table_type): + """Basic test of formatting, unit header row included""" + self._setup(table_type) + lines = self.tb.pformat(max_lines=8, max_width=40) + assert lines == [' col0 col1 ... col19 ', + ' km2 ... kg s / m2', + '------------ ----------- ... ---------', + '0.000000e+00 1.000000 ... 19.0', + ' ... ... ... ...', + '1.960000e+03 1961.000000 ... 1979.0', + '1.980000e+03 1981.000000 ... 1999.0', + 'Length = 100 rows'] + + def test_format2(self, table_type): + """Basic test of formatting, unit header row excluded""" + self._setup(table_type) + lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=False) + assert lines == [' col0 col1 ... col19 ', + '------------ ----------- ... ------', + '0.000000e+00 1.000000 ... 19.0', + '2.000000e+01 21.000000 ... 39.0', + ' ... ... ... ...', + '1.960000e+03 1961.000000 ... 1979.0', + '1.980000e+03 1981.000000 ... 1999.0', + 'Length = 100 rows'] + + def test_format3(self, table_type): + """Include the unit header row""" + self._setup(table_type) + lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=True) + + assert lines == [' col0 col1 ... col19 ', + ' km2 ... kg s / m2', + '------------ ----------- ... ---------', + '0.000000e+00 1.000000 ... 19.0', + ' ... ... ... ...', + '1.960000e+03 1961.000000 ... 1979.0', + '1.980000e+03 1981.000000 ... 1999.0', + 'Length = 100 rows'] + + def test_format4(self, table_type): + """Do not include the name header row""" + self._setup(table_type) + lines = self.tb.pformat(max_lines=8, max_width=40, show_name=False) + assert lines == [' km2 ... kg s / m2', + '------------ ----------- ... ---------', + '0.000000e+00 1.000000 ... 19.0', + '2.000000e+01 21.000000 ... 39.0', + ' ... ... ... ...', + '1.960000e+03 1961.000000 ... 1979.0', + '1.980000e+03 1981.000000 ... 1999.0', + 'Length = 100 rows'] + + def test_noclip(self, table_type): + """Basic table print""" + self._setup(table_type) + lines = self.ts.pformat(max_lines=-1, max_width=-1) + assert lines == ['col0 col1 col2', + '---- ---- ----', + ' 0 1 2', + ' 3 4 5', + ' 6 7 8', + ' 9 10 11', + ' 12 13 14', + ' 15 16 17'] + + def test_clip1(self, table_type): + """max lines below hard limit of 8 + """ + self._setup(table_type) + lines = self.ts.pformat(max_lines=3, max_width=-1) + assert lines == ['col0 col1 col2', + '---- ---- ----', + ' 0 1 2', + ' 3 4 5', + ' 6 7 8', + ' 9 10 11', + ' 12 13 14', + ' 15 16 17'] + + def test_clip2(self, table_type): + """max lines below hard limit of 8 and output longer than 8 + """ + self._setup(table_type) + lines = self.ts.pformat(max_lines=3, max_width=-1, show_unit=True, show_dtype=True) + assert lines == [' col0 col1 col2', + ' ', + 'int64 int64 int64', + '----- ----- -----', + ' 0 1 2', + ' ... ... ...', + ' 15 16 17', + 'Length = 6 rows'] + + def test_clip3(self, table_type): + """Max lines below hard limit of 8 and max width below hard limit + of 10 + """ + self._setup(table_type) + lines = self.ts.pformat(max_lines=3, max_width=1, show_unit=True) + assert lines == ['col0 ...', + ' ...', + '---- ...', + ' 0 ...', + ' ... ...', + ' 12 ...', + ' 15 ...', + 'Length = 6 rows'] + + def test_clip4(self, table_type): + """Test a range of max_lines""" + self._setup(table_type) + for max_lines in (0, 1, 4, 5, 6, 7, 8, 100, 101, 102, 103, 104, 130): + lines = self.tb.pformat(max_lines=max_lines, show_unit=False) + assert len(lines) == max(8, min(102, max_lines)) + + +@pytest.mark.usefixtures('table_type') +class TestFormat(): + + def test_column_format(self, table_type): + t = table_type([[1, 2], [3, 4]], names=('a', 'b')) + # default (format=None) + assert str(t['a']) == ' a \n---\n 1\n 2' + + # just a plain format string + t['a'].format = '5.2f' + assert str(t['a']) == ' a \n-----\n 1.00\n 2.00' + + # Old-style that is almost new-style + t['a'].format = '{ %4.2f }' + assert str(t['a']) == ' a \n--------\n{ 1.00 }\n{ 2.00 }' + + # New-style that is almost old-style + t['a'].format = '%{0:}' + assert str(t['a']) == ' a \n---\n %1\n %2' + + # New-style with extra spaces + t['a'].format = ' {0:05d} ' + assert str(t['a']) == ' a \n-------\n 00001 \n 00002 ' + + # New-style has precedence + t['a'].format = '%4.2f {0:}' + assert str(t['a']) == ' a \n-------\n%4.2f 1\n%4.2f 2' + + # Invalid format spec + t['a'].format = 'fail' + with pytest.raises(ValueError): + str(t['a']) + + def test_column_format_with_threshold(self, table_type): + from ... import conf + with conf.set_temp('max_lines', 8): + t = table_type([np.arange(20)], names=['a']) + t['a'].format = '%{0:}' + assert str(t['a']).splitlines() == [' a ', + '---', + ' %0', + ' %1', + '...', + '%18', + '%19', + 'Length = 20 rows'] + t['a'].format = '{ %4.2f }' + assert str(t['a']).splitlines() == [' a ', + '---------', + ' { 0.00 }', + ' { 1.00 }', + ' ...', + '{ 18.00 }', + '{ 19.00 }', + 'Length = 20 rows'] + + def test_column_format_func(self, table_type): + # run most of functions twice + # 1) astropy.table.pprint._format_funcs gets populated + # 2) astropy.table.pprint._format_funcs gets used + + t = table_type([[1., 2.], [3, 4]], names=('a', 'b')) + + # mathematical function + t['a'].format = lambda x: str(x * 3.) + assert str(t['a']) == ' a \n---\n3.0\n6.0' + assert str(t['a']) == ' a \n---\n3.0\n6.0' + + def test_column_format_callable(self, table_type): + # run most of functions twice + # 1) astropy.table.pprint._format_funcs gets populated + # 2) astropy.table.pprint._format_funcs gets used + + t = table_type([[1., 2.], [3, 4]], names=('a', 'b')) + + # mathematical function + class format(object): + def __call__(self, x): + return str(x * 3.) + t['a'].format = format() + assert str(t['a']) == ' a \n---\n3.0\n6.0' + assert str(t['a']) == ' a \n---\n3.0\n6.0' + + def test_column_format_func_wrong_number_args(self, table_type): + t = table_type([[1., 2.], [3, 4]], names=('a', 'b')) + + # function that expects wrong number of arguments + def func(a, b): + pass + + t['a'].format = func + with pytest.raises(ValueError): + str(t['a']) + + def test_column_format_func_multiD(self, table_type): + arr = [np.array([[1, 2], + [10, 20]])] + t = table_type(arr, names=['a']) + + # mathematical function + t['a'].format = lambda x: str(x * 3.) + outstr = ' a [2] \n------------\n 3.0 .. 6.0\n30.0 .. 60.0' + assert str(t['a']) == outstr + assert str(t['a']) == outstr + + def test_column_format_func_not_str(self, table_type): + t = table_type([[1., 2.], [3, 4]], names=('a', 'b')) + + # mathematical function + t['a'].format = lambda x: x * 3 + with pytest.raises(ValueError): + str(t['a']) + + def test_column_alignment(self, table_type): + t = table_type([[1], [2], [3], [4]], + names=('long title a', 'long title b', + 'long title c', 'long title d')) + t['long title a'].format = '<' + t['long title b'].format = '^' + t['long title c'].format = '>' + t['long title d'].format = '0=' + assert str(t['long title a']) == 'long title a\n------------\n1 ' + assert str(t['long title b']) == 'long title b\n------------\n 2 ' + assert str(t['long title c']) == 'long title c\n------------\n 3' + assert str(t['long title d']) == 'long title d\n------------\n000000000004' + + +class TestFormatWithMaskedElements(): + + def test_column_format(self): + t = Table([[1, 2, 3], [3, 4, 5]], names=('a', 'b'), masked=True) + t['a'].mask = [True, False, True] + # default (format=None) + assert str(t['a']) == ' a \n---\n --\n 2\n --' + + # just a plain format string + t['a'].format = '5.2f' + assert str(t['a']) == ' a \n-----\n --\n 2.00\n --' + + # Old-style that is almost new-style + t['a'].format = '{ %4.2f }' + assert str(t['a']) == ' a \n--------\n --\n{ 2.00 }\n --' + + # New-style that is almost old-style + t['a'].format = '%{0:}' + assert str(t['a']) == ' a \n---\n --\n %2\n --' + + # New-style with extra spaces + t['a'].format = ' {0:05d} ' + assert str(t['a']) == ' a \n-------\n --\n 00002 \n --' + + # New-style has precedence + t['a'].format = '%4.2f {0:}' + assert str(t['a']) == ' a \n-------\n --\n%4.2f 2\n --' + + def test_column_format_with_threshold(self, table_type): + from ... import conf + with conf.set_temp('max_lines', 8): + t = table_type([np.arange(20)], names=['a']) + t['a'].format = '%{0:}' + t['a'].mask[0] = True + t['a'].mask[-1] = True + assert str(t['a']).splitlines() == [' a ', + '---', + ' --', + ' %1', + '...', + '%18', + ' --', + 'Length = 20 rows'] + t['a'].format = '{ %4.2f }' + assert str(t['a']).splitlines() == [' a ', + '---------', + ' --', + ' { 1.00 }', + ' ...', + '{ 18.00 }', + ' --', + 'Length = 20 rows'] + + def test_column_format_func(self): + # run most of functions twice + # 1) astropy.table.pprint._format_funcs gets populated + # 2) astropy.table.pprint._format_funcs gets used + + t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True) + t['a'].mask = [True, False, True] + # mathematical function + t['a'].format = lambda x: str(x * 3.) + assert str(t['a']) == ' a \n---\n --\n6.0\n --' + assert str(t['a']) == ' a \n---\n --\n6.0\n --' + + def test_column_format_func_with_special_masked(self): + # run most of functions twice + # 1) astropy.table.pprint._format_funcs gets populated + # 2) astropy.table.pprint._format_funcs gets used + + t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True) + t['a'].mask = [True, False, True] + # mathematical function + + def format_func(x): + if x is np.ma.masked: + return '!!' + else: + return str(x * 3.) + t['a'].format = format_func + assert str(t['a']) == ' a \n---\n !!\n6.0\n !!' + assert str(t['a']) == ' a \n---\n !!\n6.0\n !!' + + def test_column_format_callable(self): + # run most of functions twice + # 1) astropy.table.pprint._format_funcs gets populated + # 2) astropy.table.pprint._format_funcs gets used + + t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True) + t['a'].mask = [True, False, True] + + # mathematical function + class format(object): + def __call__(self, x): + return str(x * 3.) + t['a'].format = format() + assert str(t['a']) == ' a \n---\n --\n6.0\n --' + assert str(t['a']) == ' a \n---\n --\n6.0\n --' + + def test_column_format_func_wrong_number_args(self): + t = Table([[1., 2.], [3, 4]], names=('a', 'b'), masked=True) + t['a'].mask = [True, False] + + # function that expects wrong number of arguments + def func(a, b): + pass + + t['a'].format = func + with pytest.raises(ValueError): + str(t['a']) + + # but if all are masked, it never gets called + t['a'].mask = [True, True] + assert str(t['a']) == ' a \n---\n --\n --' + + def test_column_format_func_multiD(self): + arr = [np.array([[1, 2], + [10, 20]])] + t = Table(arr, names=['a'], masked=True) + t['a'].mask[0, 1] = True + t['a'].mask[1, 1] = True + # mathematical function + t['a'].format = lambda x: str(x * 3.) + outstr = ' a [2] \n----------\n 3.0 .. --\n30.0 .. --' + assert str(t['a']) == outstr + assert str(t['a']) == outstr + + +def test_pprint_npfloat32(): + """ + Test for #148, that np.float32 cannot by itself be formatted as float, + but has to be converted to a python float. + """ + dat = np.array([1., 2.], dtype=np.float32) + t = Table([dat], names=['a']) + t['a'].format = '5.2f' + assert str(t['a']) == ' a \n-----\n 1.00\n 2.00' + + +def test_pprint_py3_bytes(): + """ + Test for #1346 and #4944. Make sure a bytestring (dtype=S) in Python 3 + is printed correctly (without the "b" prefix like b'string'). + Also make sure special characters are printed in Python 2. + """ + val = str('val') if PY2 else bytes('val', encoding='utf-8') + blah = u'bläh'.encode('utf-8') + dat = np.array([val, blah], dtype=[(str('col'), 'S10')]) + t = table.Table(dat) + assert t['col'].pformat() == ['col ', '----', ' val', u'bläh'] + + +def test_pprint_nameless_col(): + """Regression test for #2213, making sure a nameless column can be printed + using None as the name. + """ + col = table.Column([1., 2.]) + assert str(col).startswith('None') + + +def test_html(): + """Test HTML printing""" + dat = np.array([1., 2.], dtype=np.float32) + t = Table([dat], names=['a']) + + lines = t.pformat(html=True) + assert lines == [''.format(id=id(t)), + u'', + u'', + u'', + u'
    a
    1.0
    2.0
    '] + + lines = t.pformat(html=True, tableclass='table-striped') + assert lines == [ + ''.format(id=id(t)), + u'', + u'', + u'', + u'
    a
    1.0
    2.0
    '] + + lines = t.pformat(html=True, tableclass=['table', 'table-striped']) + assert lines == [ + ''.format(id=id(t)), + u'', + u'', + u'', + u'
    a
    1.0
    2.0
    '] + + +def test_align(): + t = simple_table(2, kinds='iS') + assert t.pformat() == [' a b ', + '--- ---', + ' 1 b', + ' 2 c'] + # Use column format attribute + t['a'].format = '<' + assert t.pformat() == [' a b ', + '--- ---', + '1 b', + '2 c'] + + # Now override column format attribute with various combinations of align + tpf = [' a b ', + '--- ---', + ' 1 b ', + ' 2 c '] + for align in ('^', ['^', '^'], ('^', '^')): + assert tpf == t.pformat(align=align) + + assert t.pformat(align='<') == [' a b ', + '--- ---', + '1 b ', + '2 c '] + assert t.pformat(align='0=') == [' a b ', + '--- ---', + '001 00b', + '002 00c'] + + assert t.pformat(align=['<', '^']) == [' a b ', + '--- ---', + '1 b ', + '2 c '] + + # Now use fill characters. Stress the system using a fill + # character that is the same as an align character. + t = simple_table(2, kinds='iS') + + assert t.pformat(align='^^') == [' a b ', + '--- ---', + '^1^ ^b^', + '^2^ ^c^'] + + assert t.pformat(align='^>') == [' a b ', + '--- ---', + '^^1 ^^b', + '^^2 ^^c'] + + assert t.pformat(align='^<') == [' a b ', + '--- ---', + '1^^ b^^', + '2^^ c^^'] + + # Complicated interaction (same as narrative docs example) + t1 = Table([[1.0, 2.0], [1, 2]], names=['column1', 'column2']) + t1['column1'].format = '#^.2f' + + assert t1.pformat() == ['column1 column2', + '------- -------', + '##1.00# 1', + '##2.00# 2'] + + assert t1.pformat(align='!<') == ['column1 column2', + '------- -------', + '1.00!!! 1!!!!!!', + '2.00!!! 2!!!!!!'] + + assert t1.pformat(align=[None, '!<']) == ['column1 column2', + '------- -------', + '##1.00# 1!!!!!!', + '##2.00# 2!!!!!!'] + + # Zero fill + t['a'].format = '+d' + assert t.pformat(align='0=') == [' a b ', + '--- ---', + '+01 00b', + '+02 00c'] + + with pytest.raises(ValueError): + t.pformat(align=['fail']) + + with pytest.raises(TypeError): + t.pformat(align=0) + + with pytest.raises(TypeError): + t.pprint(align=0) + + # Make sure pprint() does not raise an exception + t.pprint() + + with pytest.raises(ValueError): + t.pprint(align=['<', '<', '<']) + + with pytest.raises(ValueError): + t.pprint(align='x=') + + +def test_auto_format_func(): + """Test for #5802 (fix for #5800 where format_func key is not unique)""" + t = Table([[1, 2] * u.m]) + t['col0'].format = '%f' + t.pformat() # Force caching of format function + + qt = QTable(t) + qt.pformat() # Generates exception prior to #5802 + + +def test_decode_replace(): + """ + Test printing a bytestring column with a value that fails + decoding to utf-8 and gets replaced by U+FFFD. See + https://docs.python.org/3/library/codecs.html#codecs.replace_errors + """ + t = Table([[b'Z\xf0']]) + assert t.pformat() == [u'col0', u'----', u' Z\ufffd'] diff --git a/astropy/table/tests/test_row.py b/astropy/table/tests/test_row.py new file mode 100644 index 0000000..678e56e --- /dev/null +++ b/astropy/table/tests/test_row.py @@ -0,0 +1,204 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +import sys + +import pytest +import numpy as np + +from ... import table +from ...table import Row +from ...extern.six.moves import zip +from .conftest import MaskedTable + + +def test_masked_row_with_object_col(): + """ + Numpy < 1.8 has a bug in masked array that prevents access a row if there is + a column with object type. + """ + t = table.Table([[1]], dtype=['O'], masked=True) + t['col0'].mask = False + assert t[0]['col0'] == 1 + t['col0'].mask = True + assert t[0]['col0'] is np.ma.masked + + +@pytest.mark.usefixtures('table_types') +class TestRow(): + def _setup(self, table_types): + self._table_type = table_types.Table + self._column_type = table_types.Column + + @property + def t(self): + # py.test wants to run this method once before table_types is run + # to set Table and Column. In this case just return None, which would + # cause any downstream test to fail if this happened in any other context. + if self._column_type is None: + return None + if not hasattr(self, '_t'): + a = self._column_type(name='a', data=[1, 2, 3], dtype='i8') + b = self._column_type(name='b', data=[4, 5, 6], dtype='i8') + self._t = self._table_type([a, b]) + return self._t + + def test_subclass(self, table_types): + """Row is subclass of ndarray and Row""" + self._setup(table_types) + c = Row(self.t, 2) + assert isinstance(c, Row) + + def test_values(self, table_types): + """Row accurately reflects table values and attributes""" + self._setup(table_types) + table = self.t + row = table[1] + assert row['a'] == 2 + assert row['b'] == 5 + assert row[0] == 2 + assert row[1] == 5 + assert row.meta is table.meta + assert row.colnames == table.colnames + assert row.columns is table.columns + with pytest.raises(IndexError): + row[2] + if sys.byteorder == 'little': + assert str(row.dtype) == "[('a', 'i8'), ('b', '>i8')]" + + def test_ref(self, table_types): + """Row is a reference into original table data""" + self._setup(table_types) + table = self.t + row = table[1] + row['a'] = 10 + if table_types.Table is not MaskedTable: + assert table['a'][1] == 10 + + def test_left_equal(self, table_types): + """Compare a table row to the corresponding structured array row""" + self._setup(table_types) + np_t = self.t.as_array() + if table_types.Table is MaskedTable: + with pytest.raises(ValueError): + self.t[0] == np_t[0] + else: + for row, np_row in zip(self.t, np_t): + assert np.all(row == np_row) + + def test_left_not_equal(self, table_types): + """Compare a table row to the corresponding structured array row""" + self._setup(table_types) + np_t = self.t.as_array() + np_t['a'] = [0, 0, 0] + if table_types.Table is MaskedTable: + with pytest.raises(ValueError): + self.t[0] == np_t[0] + else: + for row, np_row in zip(self.t, np_t): + assert np.all(row != np_row) + + def test_right_equal(self, table_types): + """Test right equal""" + self._setup(table_types) + np_t = self.t.as_array() + if table_types.Table is MaskedTable: + with pytest.raises(ValueError): + self.t[0] == np_t[0] + else: + for row, np_row in zip(self.t, np_t): + assert np.all(np_row == row) + + def test_convert_numpy_array(self, table_types): + self._setup(table_types) + d = self.t[1] + + np_data = np.array(d) + if table_types.Table is not MaskedTable: + assert np.all(np_data == d.as_void()) + assert np_data is not d.as_void() + assert d.colnames == list(np_data.dtype.names) + + np_data = np.array(d, copy=False) + if table_types.Table is not MaskedTable: + assert np.all(np_data == d.as_void()) + assert np_data is not d.as_void() + assert d.colnames == list(np_data.dtype.names) + + with pytest.raises(ValueError): + np_data = np.array(d, dtype=[(str('c'), 'i8'), (str('d'), 'i8')]) + + def test_format_row(self, table_types): + """Test formatting row""" + self._setup(table_types) + table = self.t + row = table[0] + assert repr(row).splitlines() == ['<{0} {1}{2}>' + .format(row.__class__.__name__, + 'index=0', + ' masked=True' if table.masked else ''), + ' a b ', + 'int64 int64', + '----- -----', + ' 1 4'] + assert str(row).splitlines() == [' a b ', + '--- ---', + ' 1 4'] + + assert row._repr_html_().splitlines() == ['<{0} {1}{2}>' + .format(row.__class__.__name__, + 'index=0', + ' masked=True' if table.masked else ''), + ''.format(id(table)), + '', + '', + '', + '
    ab
    int64int64
    14
    '] + + def test_as_void(self, table_types): + """Test the as_void() method""" + self._setup(table_types) + table = self.t + row = table[0] + + # If masked then with no masks, issue numpy/numpy#483 should come + # into play. Make sure as_void() code is working. + row_void = row.as_void() + if table.masked: + assert isinstance(row_void, np.ma.mvoid) + else: + assert isinstance(row_void, np.void) + assert row_void['a'] == 1 + assert row_void['b'] == 4 + + # Confirm row is a view of table but row_void is not. + table['a'][0] = -100 + assert row['a'] == -100 + assert row_void['a'] == 1 + + # Make sure it works for a table that has masked elements + if table.masked: + table['a'].mask = True + + # row_void is not a view, need to re-make + assert row_void['a'] == 1 + row_void = row.as_void() # but row is a view + assert row['a'] is np.ma.masked + + def test_row_and_as_void_with_objects(self, table_types): + """Test the deprecated data property and as_void() method""" + t = table_types.Table([[{'a': 1}, {'b': 2}]], names=('a',)) + assert t[0][0] == {'a': 1} + assert t[0]['a'] == {'a': 1} + assert t[0].as_void()[0] == {'a': 1} + assert t[0].as_void()['a'] == {'a': 1} + + def test_bounds_checking(self, table_types): + """Row gives index error upon creation for out-of-bounds index""" + self._setup(table_types) + for ibad in (-5, -4, 3, 4): + with pytest.raises(IndexError): + self.t[ibad] diff --git a/astropy/table/tests/test_subclass.py b/astropy/table/tests/test_subclass.py new file mode 100644 index 0000000..d72918c --- /dev/null +++ b/astropy/table/tests/test_subclass.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +from ... import table +from .. import pprint + + +class MyRow(table.Row): + def __str__(self): + return str(self.as_void()) + + +class MyColumn(table.Column): + pass + + +class MyMaskedColumn(table.MaskedColumn): + pass + + +class MyTableColumns(table.TableColumns): + pass + + +class MyTableFormatter(pprint.TableFormatter): + pass + + +class MyTable(table.Table): + Row = MyRow + Column = MyColumn + MaskedColumn = MyMaskedColumn + TableColumns = MyTableColumns + TableFormatter = MyTableFormatter + + +def test_simple_subclass(): + t = MyTable([[1, 2], [3, 4]]) + row = t[0] + assert isinstance(row, MyRow) + assert isinstance(t['col0'], MyColumn) + assert isinstance(t.columns, MyTableColumns) + assert isinstance(t.formatter, MyTableFormatter) + + t2 = MyTable(t) + row = t2[0] + assert isinstance(row, MyRow) + assert str(row) == '(1, 3)' + + t3 = table.Table(t) + row = t3[0] + assert not isinstance(row, MyRow) + assert str(row) != '(1, 3)' + + t = MyTable([[1, 2], [3, 4]], masked=True) + row = t[0] + assert isinstance(row, MyRow) + assert str(row) == '(1, 3)' + assert isinstance(t['col0'], MyMaskedColumn) + assert isinstance(t.formatter, MyTableFormatter) + + +class ParamsRow(table.Row): + """ + Row class that allows access to an arbitrary dict of parameters + stored as a dict object in the ``params`` column. + """ + + def __getitem__(self, item): + if item not in self.colnames: + return super(ParamsRow, self).__getitem__('params')[item] + else: + return super(ParamsRow, self).__getitem__(item) + + def keys(self): + out = [name for name in self.colnames if name != 'params'] + params = [key.lower() for key in sorted(self['params'])] + return out + params + + def values(self): + return [self[key] for key in self.keys()] + + +class ParamsTable(table.Table): + Row = ParamsRow + + +def test_params_table(): + t = ParamsTable(names=['a', 'b', 'params'], dtype=['i', 'f', 'O']) + t.add_row((1, 2.0, {'x': 1.5, 'y': 2.5})) + t.add_row((2, 3.0, {'z': 'hello', 'id': 123123})) + assert t['params'][0] == {'x': 1.5, 'y': 2.5} + assert t[0]['params'] == {'x': 1.5, 'y': 2.5} + assert t[0]['y'] == 2.5 + assert t[1]['id'] == 123123 + assert list(t[1].keys()) == ['a', 'b', 'id', 'z'] + assert list(t[1].values()) == [2, 3.0, 123123, 'hello'] diff --git a/astropy/table/tests/test_table.py b/astropy/table/tests/test_table.py new file mode 100644 index 0000000..6b40a75 --- /dev/null +++ b/astropy/table/tests/test_table.py @@ -0,0 +1,1952 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +import copy +import gc +import sys +from collections import OrderedDict + +import pytest +import numpy as np +from numpy.testing import assert_allclose + +from ...extern import six +from ...io import fits +from ...tests.helper import (assert_follows_unicode_guidelines, + ignore_warnings, catch_warnings) +from ...utils.data import get_pkg_data_filename +from ... import table +from ... import units as u +from .conftest import MaskedTable + +from ...extern.six.moves import zip, range, cStringIO as StringIO + +try: + with ignore_warnings(DeprecationWarning): + # Ignore DeprecationWarning on pandas import in Python 3.5--see + # https://github.com/astropy/astropy/issues/4380 + import pandas # pylint: disable=W0611 +except ImportError: + HAS_PANDAS = False +else: + HAS_PANDAS = True + + +class SetupData(object): + def _setup(self, table_types): + self._table_type = table_types.Table + self._column_type = table_types.Column + + @property + def a(self): + if self._column_type is not None: + if not hasattr(self, '_a'): + self._a = self._column_type( + [1, 2, 3], name='a', format='%d', + meta={'aa': [0, 1, 2, 3, 4]}) + return self._a + + @property + def b(self): + if self._column_type is not None: + if not hasattr(self, '_b'): + self._b = self._column_type( + [4, 5, 6], name='b', format='%d', meta={'aa': 1}) + return self._b + + @property + def c(self): + if self._column_type is not None: + if not hasattr(self, '_c'): + self._c = self._column_type([7, 8, 9], 'c') + return self._c + + @property + def d(self): + if self._column_type is not None: + if not hasattr(self, '_d'): + self._d = self._column_type([7, 8, 7], 'd') + return self._d + + @property + def obj(self): + if self._column_type is not None: + if not hasattr(self, '_obj'): + self._obj = self._column_type([1, 'string', 3], 'obj', dtype='O') + return self._obj + + @property + def t(self): + if self._table_type is not None: + if not hasattr(self, '_t'): + self._t = self._table_type([self.a, self.b]) + return self._t + + +@pytest.mark.usefixtures('table_types') +class TestSetTableColumn(SetupData): + + def test_set_row(self, table_types): + """Set a row from a tuple of values""" + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + t[1] = (20, 21) + assert t['a'][0] == 1 + assert t['a'][1] == 20 + assert t['a'][2] == 3 + assert t['b'][0] == 4 + assert t['b'][1] == 21 + assert t['b'][2] == 6 + + def test_set_row_existing(self, table_types): + """Set a row from another existing row""" + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + t[0] = t[1] + assert t[0][0] == 2 + assert t[0][1] == 5 + + def test_set_row_fail_1(self, table_types): + """Set a row from an incorrectly-sized or typed set of values""" + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + with pytest.raises(ValueError): + t[1] = (20, 21, 22) + with pytest.raises(TypeError): + t[1] = 0 + + def test_set_row_fail_2(self, table_types): + """Set a row from an incorrectly-typed tuple of values""" + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + with pytest.raises(ValueError): + t[1] = ('abc', 'def') + + def test_set_new_col_new_table(self, table_types): + """Create a new column in empty table using the item access syntax""" + self._setup(table_types) + t = table_types.Table() + t['aa'] = self.a + # Test that the new column name is 'aa' and that the values match + assert np.all(t['aa'] == self.a) + assert t.colnames == ['aa'] + + def test_set_new_col_new_table_quantity(self, table_types): + """Create a new column (from a quantity) in empty table using the item access syntax""" + self._setup(table_types) + t = table_types.Table() + + t['aa'] = np.array([1, 2, 3]) * u.m + assert np.all(t['aa'] == np.array([1, 2, 3])) + assert t['aa'].unit == u.m + + t['bb'] = 3 * u.m + assert np.all(t['bb'] == 3) + assert t['bb'].unit == u.m + + def test_set_new_col_existing_table(self, table_types): + """Create a new column in an existing table using the item access syntax""" + self._setup(table_types) + t = table_types.Table([self.a]) + + # Add a column + t['bb'] = self.b + assert np.all(t['bb'] == self.b) + assert t.colnames == ['a', 'bb'] + assert t['bb'].meta == self.b.meta + assert t['bb'].format == self.b.format + + # Add another column + t['c'] = t['a'] + assert np.all(t['c'] == t['a']) + assert t.colnames == ['a', 'bb', 'c'] + assert t['c'].meta == t['a'].meta + assert t['c'].format == t['a'].format + + # Add a multi-dimensional column + t['d'] = table_types.Column(np.arange(12).reshape(3, 2, 2)) + assert t['d'].shape == (3, 2, 2) + assert t['d'][0, 0, 1] == 1 + + # Add column from a list + t['e'] = ['hello', 'the', 'world'] + assert np.all(t['e'] == np.array(['hello', 'the', 'world'])) + + # Make sure setting existing column still works + t['e'] = ['world', 'hello', 'the'] + assert np.all(t['e'] == np.array(['world', 'hello', 'the'])) + + # Add a column via broadcasting + t['f'] = 10 + assert np.all(t['f'] == 10) + + # Add a column from a Quantity + t['g'] = np.array([1, 2, 3]) * u.m + assert np.all(t['g'].data == np.array([1, 2, 3])) + assert t['g'].unit == u.m + + # Add a column from a (scalar) Quantity + t['g'] = 3 * u.m + assert np.all(t['g'].data == 3) + assert t['g'].unit == u.m + + def test_set_new_unmasked_col_existing_table(self, table_types): + """Create a new column in an existing table using the item access syntax""" + self._setup(table_types) + t = table_types.Table([self.a]) # masked or unmasked + b = table.Column(name='b', data=[1, 2, 3]) # unmasked + t['b'] = b + assert np.all(t['b'] == b) + + def test_set_new_masked_col_existing_table(self, table_types): + """Create a new column in an existing table using the item access syntax""" + self._setup(table_types) + t = table_types.Table([self.a]) # masked or unmasked + b = table.MaskedColumn(name='b', data=[1, 2, 3]) # masked + t['b'] = b + assert np.all(t['b'] == b) + + def test_set_new_col_existing_table_fail(self, table_types): + """Generate failure when creating a new column using the item access syntax""" + self._setup(table_types) + t = table_types.Table([self.a]) + # Wrong size + with pytest.raises(ValueError): + t['b'] = [1, 2] + + +@pytest.mark.usefixtures('table_types') +class TestEmptyData(): + + def test_1(self, table_types): + t = table_types.Table() + t.add_column(table_types.Column(name='a', dtype=int, length=100)) + assert len(t['a']) == 100 + + def test_2(self, table_types): + t = table_types.Table() + t.add_column(table_types.Column(name='a', dtype=int, shape=(3, ), length=100)) + assert len(t['a']) == 100 + + def test_3(self, table_types): + t = table_types.Table() # length is not given + t.add_column(table_types.Column(name='a', dtype=int)) + assert len(t['a']) == 0 + + def test_4(self, table_types): + t = table_types.Table() # length is not given + t.add_column(table_types.Column(name='a', dtype=int, shape=(3, 4))) + assert len(t['a']) == 0 + + def test_5(self, table_types): + t = table_types.Table() + t.add_column(table_types.Column(name='a')) # dtype is not specified + assert len(t['a']) == 0 + + def test_add_via_setitem_and_slice(self, table_types): + """Test related to #3023 where a MaskedColumn is created with name=None + and then gets changed to name='a'. After PR #2790 this test fails + without the #3023 fix.""" + t = table_types.Table() + t['a'] = table_types.Column([1, 2, 3]) + t2 = t[:] + assert t2.colnames == t.colnames + + +@pytest.mark.usefixtures('table_types') +class TestNewFromColumns(): + + def test_simple(self, table_types): + cols = [table_types.Column(name='a', data=[1, 2, 3]), + table_types.Column(name='b', data=[4, 5, 6], dtype=np.float32)] + t = table_types.Table(cols) + assert np.all(t['a'].data == np.array([1, 2, 3])) + assert np.all(t['b'].data == np.array([4, 5, 6], dtype=np.float32)) + assert type(t['b'][1]) is np.float32 + + def test_from_np_array(self, table_types): + cols = [table_types.Column(name='a', data=np.array([1, 2, 3], dtype=np.int64), + dtype=np.float64), + table_types.Column(name='b', data=np.array([4, 5, 6], dtype=np.float32))] + t = table_types.Table(cols) + assert np.all(t['a'] == np.array([1, 2, 3], dtype=np.float64)) + assert np.all(t['b'] == np.array([4, 5, 6], dtype=np.float32)) + assert type(t['a'][1]) is np.float64 + assert type(t['b'][1]) is np.float32 + + def test_size_mismatch(self, table_types): + cols = [table_types.Column(name='a', data=[1, 2, 3]), + table_types.Column(name='b', data=[4, 5, 6, 7])] + with pytest.raises(ValueError): + table_types.Table(cols) + + def test_name_none(self, table_types): + """Column with name=None can init a table whether or not names are supplied""" + c = table_types.Column(data=[1, 2], name='c') + d = table_types.Column(data=[3, 4]) + t = table_types.Table([c, d], names=(None, 'd')) + assert t.colnames == ['c', 'd'] + t = table_types.Table([c, d]) + assert t.colnames == ['c', 'col1'] + + +@pytest.mark.usefixtures('table_types') +class TestReverse(): + + def test_reverse(self, table_types): + t = table_types.Table([[1, 2, 3], + ['a', 'b', 'cc']]) + t.reverse() + assert np.all(t['col0'] == np.array([3, 2, 1])) + assert np.all(t['col1'] == np.array(['cc', 'b', 'a'])) + + t2 = table_types.Table(t, copy=False) + assert np.all(t2['col0'] == np.array([3, 2, 1])) + assert np.all(t2['col1'] == np.array(['cc', 'b', 'a'])) + + t2 = table_types.Table(t, copy=True) + assert np.all(t2['col0'] == np.array([3, 2, 1])) + assert np.all(t2['col1'] == np.array(['cc', 'b', 'a'])) + + t2.sort('col0') + assert np.all(t2['col0'] == np.array([1, 2, 3])) + assert np.all(t2['col1'] == np.array(['a', 'b', 'cc'])) + + def test_reverse_big(self, table_types): + x = np.arange(10000) + y = x + 1 + t = table_types.Table([x, y], names=('x', 'y')) + t.reverse() + assert np.all(t['x'] == x[::-1]) + assert np.all(t['y'] == y[::-1]) + + +@pytest.mark.usefixtures('table_types') +class TestColumnAccess(): + + def test_1(self, table_types): + t = table_types.Table() + with pytest.raises(KeyError): + t['a'] + + def test_2(self, table_types): + t = table_types.Table() + t.add_column(table_types.Column(name='a', data=[1, 2, 3])) + assert np.all(t['a'] == np.array([1, 2, 3])) + with pytest.raises(KeyError): + t['b'] # column does not exist + + def test_itercols(self, table_types): + names = ['a', 'b', 'c'] + t = table_types.Table([[1], [2], [3]], names=names) + for name, col in zip(names, t.itercols()): + assert name == col.name + assert isinstance(col, table_types.Column) + + +@pytest.mark.usefixtures('table_types') +class TestAddLength(SetupData): + + def test_right_length(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a]) + t.add_column(self.b) + + def test_too_long(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a]) + with pytest.raises(ValueError): + t.add_column(table_types.Column(name='b', data=[4, 5, 6, 7])) # data too long + + def test_too_short(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a]) + with pytest.raises(ValueError): + t.add_column(table_types.Column(name='b', data=[4, 5])) # data too short + + +@pytest.mark.usefixtures('table_types') +class TestAddPosition(SetupData): + + def test_1(self, table_types): + self._setup(table_types) + t = table_types.Table() + t.add_column(self.a, 0) + + def test_2(self, table_types): + self._setup(table_types) + t = table_types.Table() + t.add_column(self.a, 1) + + def test_3(self, table_types): + self._setup(table_types) + t = table_types.Table() + t.add_column(self.a, -1) + + def test_5(self, table_types): + self._setup(table_types) + t = table_types.Table() + with pytest.raises(ValueError): + t.index_column('b') + + def test_6(self, table_types): + self._setup(table_types) + t = table_types.Table() + t.add_column(self.a) + t.add_column(self.b) + assert t.columns.keys() == ['a', 'b'] + + def test_7(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a]) + t.add_column(self.b, t.index_column('a')) + assert t.columns.keys() == ['b', 'a'] + + def test_8(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a]) + t.add_column(self.b, t.index_column('a') + 1) + assert t.columns.keys() == ['a', 'b'] + + def test_9(self, table_types): + self._setup(table_types) + t = table_types.Table() + t.add_column(self.a) + t.add_column(self.b, t.index_column('a') + 1) + t.add_column(self.c, t.index_column('b')) + assert t.columns.keys() == ['a', 'c', 'b'] + + def test_10(self, table_types): + self._setup(table_types) + t = table_types.Table() + t.add_column(self.a) + ia = t.index_column('a') + t.add_column(self.b, ia + 1) + t.add_column(self.c, ia) + assert t.columns.keys() == ['c', 'a', 'b'] + + +@pytest.mark.usefixtures('table_types') +class TestAddName(SetupData): + + def test_override_name(self, table_types): + self._setup(table_types) + t = table_types.Table() + + # Check that we can override the name of the input column in the Table + t.add_column(self.a, name='b') + t.add_column(self.b, name='a') + assert t.columns.keys() == ['b', 'a'] + # Check that we did not change the name of the input column + assert self.a.info.name == 'a' + assert self.b.info.name == 'b' + + # Now test with an input column from another table + t2 = table_types.Table() + t2.add_column(t['a'], name='c') + assert t2.columns.keys() == ['c'] + # Check that we did not change the name of the input column + assert t.columns.keys() == ['b', 'a'] + + # Check that we can give a name if none was present + col = table_types.Column([1, 2, 3]) + t.add_column(col, name='c') + assert t.columns.keys() == ['b', 'a', 'c'] + + def test_default_name(self, table_types): + t = table_types.Table() + col = table_types.Column([1, 2, 3]) + t.add_column(col) + assert t.columns.keys() == ['col0'] + + +@pytest.mark.usefixtures('table_types') +class TestInitFromTable(SetupData): + + def test_from_table_cols(self, table_types): + """Ensure that using cols from an existing table gives + a clean copy. + """ + self._setup(table_types) + t = self.t + cols = t.columns + # Construct Table with cols via Table._new_from_cols + t2a = table_types.Table([cols['a'], cols['b'], self.c]) + + # Construct with add_column + t2b = table_types.Table() + t2b.add_column(cols['a']) + t2b.add_column(cols['b']) + t2b.add_column(self.c) + + t['a'][1] = 20 + t['b'][1] = 21 + for t2 in [t2a, t2b]: + t2['a'][2] = 10 + t2['b'][2] = 11 + t2['c'][2] = 12 + t2.columns['a'].meta['aa'][3] = 10 + assert np.all(t['a'] == np.array([1, 20, 3])) + assert np.all(t['b'] == np.array([4, 21, 6])) + assert np.all(t2['a'] == np.array([1, 2, 10])) + assert np.all(t2['b'] == np.array([4, 5, 11])) + assert np.all(t2['c'] == np.array([7, 8, 12])) + assert t2['a'].name == 'a' + assert t2.columns['a'].meta['aa'][3] == 10 + assert t.columns['a'].meta['aa'][3] == 3 + + +@pytest.mark.usefixtures('table_types') +class TestAddColumns(SetupData): + + def test_add_columns1(self, table_types): + self._setup(table_types) + t = table_types.Table() + t.add_columns([self.a, self.b, self.c]) + assert t.colnames == ['a', 'b', 'c'] + + def test_add_columns2(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + t.add_columns([self.c, self.d]) + assert t.colnames == ['a', 'b', 'c', 'd'] + assert np.all(t['c'] == np.array([7, 8, 9])) + + def test_add_columns3(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + t.add_columns([self.c, self.d], indexes=[1, 0]) + assert t.colnames == ['d', 'a', 'c', 'b'] + + def test_add_columns4(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + t.add_columns([self.c, self.d], indexes=[0, 0]) + assert t.colnames == ['c', 'd', 'a', 'b'] + + def test_add_columns5(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + t.add_columns([self.c, self.d], indexes=[2, 2]) + assert t.colnames == ['a', 'b', 'c', 'd'] + + def test_add_columns6(self, table_types): + """Check that we can override column names.""" + self._setup(table_types) + t = table_types.Table() + t.add_columns([self.a, self.b, self.c], names=['b', 'c', 'a']) + assert t.colnames == ['b', 'c', 'a'] + + def test_add_columns7(self, table_types): + """Check that default names are used when appropriate.""" + t = table_types.Table() + col0 = table_types.Column([1, 2, 3]) + col1 = table_types.Column([4, 5, 3]) + t.add_columns([col0, col1]) + assert t.colnames == ['col0', 'col1'] + + def test_add_duplicate_column(self, table_types): + self._setup(table_types) + t = table_types.Table() + t.add_column(self.a) + with pytest.raises(ValueError): + t.add_column(table_types.Column(name='a', data=[0, 1, 2])) + t.add_column(table_types.Column(name='a', data=[0, 1, 2]), + rename_duplicate=True) + t.add_column(self.b) + t.add_column(self.c) + assert t.colnames == ['a', 'a_1', 'b', 'c'] + t.add_column(table_types.Column(name='a', data=[0, 1, 2]), + rename_duplicate=True) + assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2'] + + # test adding column from a separate Table + t1 = table_types.Table() + t1.add_column(self.a) + with pytest.raises(ValueError): + t.add_column(t1['a']) + t.add_column(t1['a'], rename_duplicate=True) + + t1['a'][0] = 100 # Change original column + assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3'] + assert t1.colnames == ['a'] + + # Check new column didn't change (since name conflict forced a copy) + assert t['a_3'][0] == self.a[0] + + def test_add_duplicate_columns(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a, self.b, self.c]) + with pytest.raises(ValueError): + t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), table_types.Column(name='b', data=[0, 1, 2])]) + t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), + table_types.Column(name='b', data=[0, 1, 2])], + rename_duplicate=True) + t.add_column(self.d) + assert t.colnames == ['a', 'b', 'c', 'a_1', 'b_1', 'd'] + + +@pytest.mark.usefixtures('table_types') +class TestAddRow(SetupData): + + @property + def b(self): + if self._column_type is not None: + if not hasattr(self, '_b'): + self._b = self._column_type(name='b', data=[4.0, 5.1, 6.2]) + return self._b + + @property + def c(self): + if self._column_type is not None: + if not hasattr(self, '_c'): + self._c = self._column_type(name='c', data=['7', '8', '9']) + return self._c + + @property + def d(self): + if self._column_type is not None: + if not hasattr(self, '_d'): + self._d = self._column_type(name='d', data=[[1, 2], [3, 4], [5, 6]]) + return self._d + + @property + def t(self): + if self._table_type is not None: + if not hasattr(self, '_t'): + self._t = self._table_type([self.a, self.b, self.c]) + return self._t + + def test_add_none_to_empty_table(self, table_types): + self._setup(table_types) + t = table_types.Table(names=('a', 'b', 'c'), dtype=('(2,)i', 'S4', 'O')) + t.add_row() + assert np.all(t['a'][0] == [0, 0]) + assert t['b'][0] == '' + assert t['c'][0] == 0 + t.add_row() + assert np.all(t['a'][1] == [0, 0]) + assert t['b'][1] == '' + assert t['c'][1] == 0 + + def test_add_stuff_to_empty_table(self, table_types): + self._setup(table_types) + t = table_types.Table(names=('a', 'b', 'obj'), dtype=('(2,)i', 'S8', 'O')) + t.add_row([[1, 2], 'hello', 'world']) + assert np.all(t['a'][0] == [1, 2]) + assert t['b'][0] == 'hello' + assert t['obj'][0] == 'world' + # Make sure it is not repeating last row but instead + # adding zeros (as documented) + t.add_row() + assert np.all(t['a'][1] == [0, 0]) + assert t['b'][1] == '' + assert t['obj'][1] == 0 + + def test_add_table_row(self, table_types): + self._setup(table_types) + t = self.t + t['d'] = self.d + t2 = table_types.Table([self.a, self.b, self.c, self.d]) + t.add_row(t2[0]) + assert len(t) == 4 + assert np.all(t['a'] == np.array([1, 2, 3, 1])) + assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0])) + assert np.all(t['c'] == np.array(['7', '8', '9', '7'])) + assert np.all(t['d'] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]])) + + def test_add_table_row_obj(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a, self.b, self.obj]) + t.add_row([1, 4.0, [10]]) + assert len(t) == 4 + assert np.all(t['a'] == np.array([1, 2, 3, 1])) + assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0])) + assert np.all(t['obj'] == np.array([1, 'string', 3, [10]], dtype='O')) + + def test_add_qtable_row_multidimensional(self): + q = [[1, 2], [3, 4]] * u.m + qt = table.QTable([q]) + qt.add_row(([5, 6] * u.km,)) + assert np.all(qt['col0'] == [[1, 2], [3, 4], [5000, 6000]] * u.m) + + def test_add_with_tuple(self, table_types): + self._setup(table_types) + t = self.t + t.add_row((4, 7.2, '1')) + assert len(t) == 4 + assert np.all(t['a'] == np.array([1, 2, 3, 4])) + assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2])) + assert np.all(t['c'] == np.array(['7', '8', '9', '1'])) + + def test_add_with_list(self, table_types): + self._setup(table_types) + t = self.t + t.add_row([4, 7.2, '10']) + assert len(t) == 4 + assert np.all(t['a'] == np.array([1, 2, 3, 4])) + assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2])) + assert np.all(t['c'] == np.array(['7', '8', '9', '1'])) + + def test_add_with_dict(self, table_types): + self._setup(table_types) + t = self.t + t.add_row({'a': 4, 'b': 7.2}) + assert len(t) == 4 + assert np.all(t['a'] == np.array([1, 2, 3, 4])) + assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2])) + if t.masked: + assert np.all(t['c'] == np.array(['7', '8', '9', '7'])) + else: + assert np.all(t['c'] == np.array(['7', '8', '9', ''])) + + def test_add_with_none(self, table_types): + self._setup(table_types) + t = self.t + t.add_row() + assert len(t) == 4 + assert np.all(t['a'].data == np.array([1, 2, 3, 0])) + assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 0.0])) + assert np.all(t['c'].data == np.array(['7', '8', '9', ''])) + + def test_add_missing_column(self, table_types): + self._setup(table_types) + t = self.t + with pytest.raises(ValueError): + t.add_row({'bad_column': 1}) + + def test_wrong_size_tuple(self, table_types): + self._setup(table_types) + t = self.t + with pytest.raises(ValueError): + t.add_row((1, 2)) + + def test_wrong_vals_type(self, table_types): + self._setup(table_types) + t = self.t + with pytest.raises(TypeError): + t.add_row(1) + + def test_add_row_failures(self, table_types): + self._setup(table_types) + t = self.t + t_copy = table_types.Table(t, copy=True) + # Wrong number of columns + try: + t.add_row([1, 2, 3, 4]) + except ValueError: + pass + assert len(t) == 3 + assert np.all(t.as_array() == t_copy.as_array()) + # Wrong data type + try: + t.add_row(['one', 2, 3]) + except ValueError: + pass + assert len(t) == 3 + assert np.all(t.as_array() == t_copy.as_array()) + + def test_insert_table_row(self, table_types): + """ + Light testing of Table.insert_row() method. The deep testing is done via + the add_row() tests which calls insert_row(index=len(self), ...), so + here just test that the added index parameter is handled correctly. + """ + self._setup(table_types) + row = (10, 40.0, 'x', [10, 20]) + for index in range(-3, 4): + indices = np.insert(np.arange(3), index, 3) + t = table_types.Table([self.a, self.b, self.c, self.d]) + t2 = t.copy() + t.add_row(row) # By now we know this works + t2.insert_row(index, row) + for name in t.colnames: + if t[name].dtype.kind == 'f': + assert np.allclose(t[name][indices], t2[name]) + else: + assert np.all(t[name][indices] == t2[name]) + + for index in (-4, 4): + t = table_types.Table([self.a, self.b, self.c, self.d]) + with pytest.raises(IndexError): + t.insert_row(index, row) + + +@pytest.mark.usefixtures('table_types') +class TestTableColumn(SetupData): + + def test_column_view(self, table_types): + self._setup(table_types) + t = self.t + a = t.columns['a'] + a[2] = 10 + assert t['a'][2] == 10 + + +@pytest.mark.usefixtures('table_types') +class TestArrayColumns(SetupData): + + def test_1d(self, table_types): + self._setup(table_types) + b = table_types.Column(name='b', dtype=int, shape=(2, ), length=3) + t = table_types.Table([self.a]) + t.add_column(b) + assert t['b'].shape == (3, 2) + assert t['b'][0].shape == (2, ) + + def test_2d(self, table_types): + self._setup(table_types) + b = table_types.Column(name='b', dtype=int, shape=(2, 4), length=3) + t = table_types.Table([self.a]) + t.add_column(b) + assert t['b'].shape == (3, 2, 4) + assert t['b'][0].shape == (2, 4) + + def test_3d(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a]) + b = table_types.Column(name='b', dtype=int, shape=(2, 4, 6), length=3) + t.add_column(b) + assert t['b'].shape == (3, 2, 4, 6) + assert t['b'][0].shape == (2, 4, 6) + + +@pytest.mark.usefixtures('table_types') +class TestRemove(SetupData): + + @property + def t(self): + if self._table_type is not None: + if not hasattr(self, '_t'): + self._t = self._table_type([self.a]) + return self._t + + @property + def t2(self): + if self._table_type is not None: + if not hasattr(self, '_t2'): + self._t2 = self._table_type([self.a, self.b, self.c]) + return self._t2 + + def test_1(self, table_types): + self._setup(table_types) + self.t.remove_columns('a') + assert self.t.columns.keys() == [] + assert self.t.as_array() is None + + def test_2(self, table_types): + self._setup(table_types) + self.t.add_column(self.b) + self.t.remove_columns('a') + assert self.t.columns.keys() == ['b'] + assert self.t.dtype.names == ('b',) + assert np.all(self.t['b'] == np.array([4, 5, 6])) + + def test_3(self, table_types): + """Check remove_columns works for a single column with a name of + more than one character. Regression test against #2699""" + self._setup(table_types) + self.t['new_column'] = self.t['a'] + assert 'new_column' in self.t.columns.keys() + self.t.remove_columns('new_column') + assert 'new_column' not in self.t.columns.keys() + + def test_remove_nonexistent_row(self, table_types): + self._setup(table_types) + with pytest.raises(IndexError): + self.t.remove_row(4) + + def test_remove_row_0(self, table_types): + self._setup(table_types) + self.t.add_column(self.b) + self.t.add_column(self.c) + self.t.remove_row(0) + assert self.t.colnames == ['a', 'b', 'c'] + assert np.all(self.t['b'] == np.array([5, 6])) + + def test_remove_row_1(self, table_types): + self._setup(table_types) + self.t.add_column(self.b) + self.t.add_column(self.c) + self.t.remove_row(1) + assert self.t.colnames == ['a', 'b', 'c'] + assert np.all(self.t['a'] == np.array([1, 3])) + + def test_remove_row_2(self, table_types): + self._setup(table_types) + self.t.add_column(self.b) + self.t.add_column(self.c) + self.t.remove_row(2) + assert self.t.colnames == ['a', 'b', 'c'] + assert np.all(self.t['c'] == np.array([7, 8])) + + def test_remove_row_slice(self, table_types): + self._setup(table_types) + self.t.add_column(self.b) + self.t.add_column(self.c) + self.t.remove_rows(slice(0, 2, 1)) + assert self.t.colnames == ['a', 'b', 'c'] + assert np.all(self.t['c'] == np.array([9])) + + def test_remove_row_list(self, table_types): + self._setup(table_types) + self.t.add_column(self.b) + self.t.add_column(self.c) + self.t.remove_rows([0, 2]) + assert self.t.colnames == ['a', 'b', 'c'] + assert np.all(self.t['c'] == np.array([8])) + + def test_remove_row_preserves_meta(self, table_types): + self._setup(table_types) + self.t.add_column(self.b) + self.t.remove_rows([0, 2]) + assert self.t['a'].meta == {'aa': [0, 1, 2, 3, 4]} + assert self.t.dtype == np.dtype([(str('a'), 'int'), + (str('b'), 'int')]) + + def test_delitem1(self, table_types): + self._setup(table_types) + del self.t['a'] + assert self.t.columns.keys() == [] + assert self.t.as_array() is None + + def test_delitem2(self, table_types): + self._setup(table_types) + del self.t2['b'] + assert self.t2.colnames == ['a', 'c'] + + def test_delitems(self, table_types): + self._setup(table_types) + del self.t2['a', 'b'] + assert self.t2.colnames == ['c'] + + def test_delitem_fail(self, table_types): + self._setup(table_types) + with pytest.raises(KeyError): + del self.t['d'] + + +@pytest.mark.usefixtures('table_types') +class TestKeep(SetupData): + + def test_1(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + t.keep_columns([]) + assert t.columns.keys() == [] + assert t.as_array() is None + + def test_2(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + t.keep_columns('b') + assert t.columns.keys() == ['b'] + assert t.dtype.names == ('b',) + assert np.all(t['b'] == np.array([4, 5, 6])) + + +@pytest.mark.usefixtures('table_types') +class TestRename(SetupData): + + def test_1(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a]) + t.rename_column('a', 'b') + assert t.columns.keys() == ['b'] + assert t.dtype.names == ('b',) + assert np.all(t['b'] == np.array([1, 2, 3])) + + def test_2(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + t.rename_column('a', 'c') + t.rename_column('b', 'a') + assert t.columns.keys() == ['c', 'a'] + assert t.dtype.names == ('c', 'a') + if t.masked: + assert t.mask.dtype.names == ('c', 'a') + assert np.all(t['c'] == np.array([1, 2, 3])) + assert np.all(t['a'] == np.array([4, 5, 6])) + + def test_rename_by_attr(self, table_types): + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + t['a'].name = 'c' + t['b'].name = 'a' + assert t.columns.keys() == ['c', 'a'] + assert t.dtype.names == ('c', 'a') + assert np.all(t['c'] == np.array([1, 2, 3])) + assert np.all(t['a'] == np.array([4, 5, 6])) + + +@pytest.mark.usefixtures('table_types') +class TestSort(): + + def test_single(self, table_types): + t = table_types.Table() + t.add_column(table_types.Column(name='a', data=[2, 1, 3])) + t.add_column(table_types.Column(name='b', data=[6, 5, 4])) + t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)])) + assert np.all(t['a'] == np.array([2, 1, 3])) + assert np.all(t['b'] == np.array([6, 5, 4])) + t.sort('a') + assert np.all(t['a'] == np.array([1, 2, 3])) + assert np.all(t['b'] == np.array([5, 6, 4])) + assert np.all(t['c'] == np.array([[3, 4], + [1, 2], + [4, 5]])) + t.sort('b') + assert np.all(t['a'] == np.array([3, 1, 2])) + assert np.all(t['b'] == np.array([4, 5, 6])) + assert np.all(t['c'] == np.array([[4, 5], + [3, 4], + [1, 2]])) + + def test_single_big(self, table_types): + """Sort a big-ish table with a non-trivial sort order""" + x = np.arange(10000) + y = np.sin(x) + t = table_types.Table([x, y], names=('x', 'y')) + t.sort('y') + idx = np.argsort(y) + assert np.all(t['x'] == x[idx]) + assert np.all(t['y'] == y[idx]) + + def test_empty(self, table_types): + t = table_types.Table([[], []], dtype=['f4', 'U1']) + t.sort('col1') + + def test_multiple(self, table_types): + t = table_types.Table() + t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1])) + t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4])) + assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1])) + assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4])) + t.sort(['a', 'b']) + assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3])) + assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5])) + t.sort(['b', 'a']) + assert np.all(t['a'] == np.array([2, 1, 3, 1, 3, 2])) + assert np.all(t['b'] == np.array([3, 4, 4, 5, 5, 6])) + t.sort(('a', 'b')) + assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3])) + assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5])) + + def test_multiple_with_bytes(self, table_types): + t = table_types.Table() + t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"])) + t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"])) + t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) + t.sort(['name', 'firstname']) + assert np.all([t['firstname'] == np.array([b"John", b"Jo", b"Max"])]) + assert np.all([t['name'] == np.array([b"Jackson", b"Miller", b"Miller"])]) + assert np.all([t['tel'] == np.array([19, 15, 12])]) + + def test_multiple_with_unicode(self, table_types): + # Before Numpy 1.6.2, sorting with multiple column names + # failed when a unicode column was present. + t = table_types.Table() + t.add_column(table_types.Column( + name='firstname', + data=[six.text_type(x) for x in ["Max", "Jo", "John"]])) + t.add_column(table_types.Column( + name='name', + data=[six.text_type(x) for x in ["Miller", "Miller", "Jackson"]])) + t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) + t.sort(['name', 'firstname']) + assert np.all([t['firstname'] == np.array( + [six.text_type(x) for x in ["John", "Jo", "Max"]])]) + assert np.all([t['name'] == np.array( + [six.text_type(x) for x in ["Jackson", "Miller", "Miller"]])]) + assert np.all([t['tel'] == np.array([19, 15, 12])]) + + def test_argsort(self, table_types): + t = table_types.Table() + t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1])) + t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4])) + assert np.all(t.argsort() == t.as_array().argsort()) + i0 = t.argsort('a') + i1 = t.as_array().argsort(order=['a']) + assert np.all(t['a'][i0] == t['a'][i1]) + i0 = t.argsort(['a', 'b']) + i1 = t.as_array().argsort(order=['a', 'b']) + assert np.all(t['a'][i0] == t['a'][i1]) + assert np.all(t['b'][i0] == t['b'][i1]) + + def test_argsort_bytes(self, table_types): + t = table_types.Table() + t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"])) + t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"])) + t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) + assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0])) + + def test_argsort_unicode(self, table_types): + # Before Numpy 1.6.2, sorting with multiple column names + # failed when a unicode column was present. + t = table_types.Table() + t.add_column(table_types.Column( + name='firstname', + data=[six.text_type(x) for x in ["Max", "Jo", "John"]])) + t.add_column(table_types.Column( + name='name', + data=[six.text_type(x) for x in ["Miller", "Miller", "Jackson"]])) + t.add_column(table_types.Column(name='tel', data=[12, 15, 19])) + assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0])) + + def test_rebuild_column_view_then_rename(self, table_types): + """ + Issue #2039 where renaming fails after any method that calls + _rebuild_table_column_view (this includes sort and add_row). + """ + t = table_types.Table([[1]], names=('a',)) + assert t.colnames == ['a'] + assert t.dtype.names == ('a',) + + t.add_row((2,)) + assert t.colnames == ['a'] + assert t.dtype.names == ('a',) + + t.rename_column('a', 'b') + assert t.colnames == ['b'] + assert t.dtype.names == ('b',) + + t.sort('b') + assert t.colnames == ['b'] + assert t.dtype.names == ('b',) + + t.rename_column('b', 'c') + assert t.colnames == ['c'] + assert t.dtype.names == ('c',) + + +@pytest.mark.usefixtures('table_types') +class TestIterator(): + + def test_iterator(self, table_types): + d = np.array([(2, 1), + (3, 6), + (4, 5)], dtype=[(str('a'), 'i4'), (str('b'), 'i4')]) + t = table_types.Table(d) + if t.masked: + with pytest.raises(ValueError): + t[0] == d[0] + else: + for row, np_row in zip(t, d): + assert np.all(row == np_row) + + +@pytest.mark.usefixtures('table_types') +class TestSetMeta(): + + def test_set_meta(self, table_types): + d = table_types.Table(names=('a', 'b')) + d.meta['a'] = 1 + d.meta['b'] = 1 + d.meta['c'] = 1 + d.meta['d'] = 1 + assert list(d.meta.keys()) == ['a', 'b', 'c', 'd'] + + +@pytest.mark.usefixtures('table_types') +class TestConvertNumpyArray(): + + def test_convert_numpy_array(self, table_types): + d = table_types.Table([[1, 2], [3, 4]], names=('a', 'b')) + + np_data = np.array(d) + if table_types.Table is not MaskedTable: + assert np.all(np_data == d.as_array()) + assert np_data is not d.as_array() + assert d.colnames == list(np_data.dtype.names) + + np_data = np.array(d, copy=False) + if table_types.Table is not MaskedTable: + assert np.all(np_data == d.as_array()) + assert d.colnames == list(np_data.dtype.names) + + with pytest.raises(ValueError): + np_data = np.array(d, dtype=[(str('c'), 'i8'), (str('d'), 'i8')]) + + def test_as_array_byteswap(self, table_types): + """Test for https://github.com/astropy/astropy/pull/4080""" + + byte_orders = ('>', '<') + native_order = byte_orders[sys.byteorder == 'little'] + + for order in byte_orders: + col = table_types.Column([1.0, 2.0], name='a', dtype=order + 'f8') + t = table_types.Table([col]) + arr = t.as_array() + assert arr['a'].dtype.byteorder in (native_order, '=') + arr = t.as_array(keep_byteorder=True) + if order == native_order: + assert arr['a'].dtype.byteorder in (order, '=') + else: + assert arr['a'].dtype.byteorder == order + + def test_byteswap_fits_array(self, table_types): + """ + Test for https://github.com/astropy/astropy/pull/4080, demonstrating + that FITS tables are converted to native byte order. + """ + + non_native_order = ('>', '<')[sys.byteorder != 'little'] + + filename = get_pkg_data_filename('data/tb.fits', + 'astropy.io.fits.tests') + t = table_types.Table.read(filename) + arr = t.as_array() + + for idx in range(len(arr.dtype)): + assert arr.dtype[idx].byteorder != non_native_order + + with fits.open(filename) as hdul: + data = hdul[1].data + for colname in data.columns.names: + assert np.all(data[colname] == arr[colname]) + + arr2 = t.as_array(keep_byteorder=True) + for colname in data.columns.names: + assert (data[colname].dtype.byteorder == + arr2[colname].dtype.byteorder) + + +def _assert_copies(t, t2, deep=True): + assert t.colnames == t2.colnames + np.testing.assert_array_equal(t.as_array(), t2.as_array()) + assert t.meta == t2.meta + + for col, col2 in zip(t.columns.values(), t2.columns.values()): + if deep: + assert not np.may_share_memory(col, col2) + else: + assert np.may_share_memory(col, col2) + + +def test_copy(): + t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y']) + t2 = t.copy() + _assert_copies(t, t2) + + +def test_copy_masked(): + t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'], masked=True, + meta={'name': 'test'}) + t['x'].mask == [True, False, True] + t2 = t.copy() + _assert_copies(t, t2) + + +def test_copy_protocol(): + t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y']) + + t2 = copy.copy(t) + t3 = copy.deepcopy(t) + + _assert_copies(t, t2, deep=False) + _assert_copies(t, t3) + + +def test_disallow_inequality_comparisons(): + """ + Regression test for #828 - disallow comparison operators on whole Table + """ + + t = table.Table() + + with pytest.raises(TypeError): + t > 2 + + with pytest.raises(TypeError): + t < 1.1 + + with pytest.raises(TypeError): + t >= 5.5 + + with pytest.raises(TypeError): + t <= -1.1 + + +def test_equality(): + + t = table.Table.read([' a b c d', + ' 2 c 7.0 0', + ' 2 b 5.0 1', + ' 2 b 6.0 2', + ' 2 a 4.0 3', + ' 0 a 0.0 4', + ' 1 b 3.0 5', + ' 1 a 2.0 6', + ' 1 a 1.0 7', + ], format='ascii') + + # All rows are equal + assert np.all(t == t) + + # Assert no rows are different + assert not np.any(t != t) + + # Check equality result for a given row + assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool)) + + # Check inequality result for a given row + assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool)) + + t2 = table.Table.read([' a b c d', + ' 2 c 7.0 0', + ' 2 b 5.0 1', + ' 3 b 6.0 2', + ' 2 a 4.0 3', + ' 0 a 1.0 4', + ' 1 b 3.0 5', + ' 1 c 2.0 6', + ' 1 a 1.0 7', + ], format='ascii') + + # In the above cases, Row.__eq__ gets called, but now need to make sure + # Table.__eq__ also gets called. + assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) + assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) + + # Check that comparing to a structured array works + assert np.all((t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) + assert np.all((t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) + + +def test_equality_masked(): + + t = table.Table.read([' a b c d', + ' 2 c 7.0 0', + ' 2 b 5.0 1', + ' 2 b 6.0 2', + ' 2 a 4.0 3', + ' 0 a 0.0 4', + ' 1 b 3.0 5', + ' 1 a 2.0 6', + ' 1 a 1.0 7', + ], format='ascii') + + # Make into masked table + t = table.Table(t, masked=True) + + # All rows are equal + assert np.all(t == t) + + # Assert no rows are different + assert not np.any(t != t) + + # Check equality result for a given row + assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool)) + + # Check inequality result for a given row + assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool)) + + t2 = table.Table.read([' a b c d', + ' 2 c 7.0 0', + ' 2 b 5.0 1', + ' 3 b 6.0 2', + ' 2 a 4.0 3', + ' 0 a 1.0 4', + ' 1 b 3.0 5', + ' 1 c 2.0 6', + ' 1 a 1.0 7', + ], format='ascii') + + # In the above cases, Row.__eq__ gets called, but now need to make sure + # Table.__eq__ also gets called. + assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) + assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) + + # Check that masking a value causes the row to differ + t.mask['a'][0] = True + assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) + assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool)) + + # Check that comparing to a structured array works + assert np.all((t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) + + +@pytest.mark.xfail +def test_equality_masked_bug(): + """ + This highlights a Numpy bug. Once it works, it can be moved into the + test_equality_masked test. Related Numpy bug report: + + https://github.com/numpy/numpy/issues/3840 + """ + + t = table.Table.read([' a b c d', + ' 2 c 7.0 0', + ' 2 b 5.0 1', + ' 2 b 6.0 2', + ' 2 a 4.0 3', + ' 0 a 0.0 4', + ' 1 b 3.0 5', + ' 1 a 2.0 6', + ' 1 a 1.0 7', + ], format='ascii') + + t = table.Table(t, masked=True) + + t2 = table.Table.read([' a b c d', + ' 2 c 7.0 0', + ' 2 b 5.0 1', + ' 3 b 6.0 2', + ' 2 a 4.0 3', + ' 0 a 1.0 4', + ' 1 b 3.0 5', + ' 1 c 2.0 6', + ' 1 a 1.0 7', + ], format='ascii') + + assert np.all((t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)) + + +# Check that the meta descriptor is working as expected. The MetaBaseTest class +# takes care of defining all the tests, and we simply have to define the class +# and any minimal set of args to pass. + +from ...utils.tests.test_metadata import MetaBaseTest + + +class TestMetaTable(MetaBaseTest): + test_class = table.Table + args = () + + +def test_unicode_column_names(table_types): + """ + Test that unicode column names are accepted. Only do this for + Python 2 since strings are unicode already in Python 3. + """ + if six.PY2: + t = table_types.Table([[1]], names=(six.text_type('a'),)) + assert t.colnames == ['a'] + t[six.text_type('b')] = 0.0 + assert t.colnames == ['a', 'b'] + + +def test_unicode_content(): + # If we don't have unicode literals then return + if isinstance('', bytes): + return + + # Define unicode literals + string_a = 'астрономическая питона' + string_b = 'миллиарды световых лет' + + a = table.Table( + [[string_a, 2], + [string_b, 3]], + names=('a', 'b')) + + assert string_a in six.text_type(a) + # This only works because the coding of this file is utf-8, which + # matches the default encoding of Table.__str__ + assert string_a.encode('utf-8') in bytes(a) + + +def test_unicode_policy(): + t = table.Table.read([' a b c d', + ' 2 c 7.0 0', + ' 2 b 5.0 1', + ' 2 b 6.0 2', + ' 2 a 4.0 3', + ' 0 a 0.0 4', + ' 1 b 3.0 5', + ' 1 a 2.0 6', + ' 1 a 1.0 7', + ], format='ascii') + assert_follows_unicode_guidelines(t) + + +def test_unicode_bytestring_conversion(table_types): + t = table_types.Table([['abc'], ['def'], [1]], dtype=('S', 'U', 'i')) + assert t['col0'].dtype.kind == 'S' + assert t['col1'].dtype.kind == 'U' + assert t['col2'].dtype.kind == 'i' + + t1 = t.copy() + t1.convert_unicode_to_bytestring() + assert t1['col0'].dtype.kind == 'S' + assert t1['col1'].dtype.kind == 'S' + assert t1['col2'].dtype.kind == 'i' + assert t1['col0'][0] == 'abc' + assert t1['col1'][0] == 'def' + assert t1['col2'][0] == 1 + + t1 = t.copy() + t1.convert_bytestring_to_unicode() + assert t1['col0'].dtype.kind == 'U' + assert t1['col1'].dtype.kind == 'U' + assert t1['col2'].dtype.kind == 'i' + assert t1['col0'][0] == six.text_type('abc') + assert t1['col1'][0] == six.text_type('def') + assert t1['col2'][0] == 1 + + +def test_table_deletion(): + """ + Regression test for the reference cycle discussed in + https://github.com/astropy/astropy/issues/2877 + """ + + deleted = set() + + # A special table subclass which leaves a record when it is finalized + class TestTable(table.Table): + def __del__(self): + deleted.add(id(self)) + + t = TestTable({'a': [1, 2, 3]}) + the_id = id(t) + assert t['a'].parent_table is t + + del t + + # Cleanup + gc.collect() + + assert the_id in deleted + + +def test_nested_iteration(): + """ + Regression test for issue 3358 where nested iteration over a single table fails. + """ + t = table.Table([[0, 1]], names=['a']) + out = [] + for r1 in t: + for r2 in t: + out.append((r1['a'], r2['a'])) + assert out == [(0, 0), (0, 1), (1, 0), (1, 1)] + + +def test_table_init_from_degenerate_arrays(table_types): + t = table_types.Table(np.array([])) + assert len(t.columns) == 0 + + with pytest.raises(ValueError): + t = table_types.Table(np.array(0)) + + t = table_types.Table(np.array([1, 2, 3])) + assert len(t.columns) == 3 + + +@pytest.mark.skipif('not HAS_PANDAS') +class TestPandas(object): + + def test_simple(self): + + t = table.Table() + + for endian in ['<', '>']: + for kind in ['f', 'i']: + for byte in ['2', '4', '8']: + dtype = np.dtype(endian + kind + byte) + x = np.array([1, 2, 3], dtype=dtype) + t[endian + kind + byte] = x + + t['u'] = ['a', 'b', 'c'] + t['s'] = ['a', 'b', 'c'] + + d = t.to_pandas() + + for column in t.columns: + if column == 'u': + assert np.all(t['u'] == np.array(['a', 'b', 'c'])) + assert d[column].dtype == np.dtype("O") # upstream feature of pandas + elif column == 's': + assert np.all(t['s'] == np.array(['a', 'b', 'c'])) + assert d[column].dtype == np.dtype("O") # upstream feature of pandas + else: + # We should be able to compare exact values here + assert np.all(t[column] == d[column]) + if t[column].dtype.byteorder in ('=', '|'): + assert d[column].dtype == t[column].dtype + else: + assert d[column].dtype == t[column].byteswap().newbyteorder().dtype + + # Regression test for astropy/astropy#1156 - the following code gave a + # ValueError: Big-endian buffer not supported on little-endian + # compiler. We now automatically swap the endian-ness to native order + # upon adding the arrays to the data frame. + d[['i4']] + d[['f4']] + + t2 = table.Table.from_pandas(d) + + for column in t.columns: + if column in ('u', 's'): + assert np.all(t[column] == t2[column]) + else: + assert_allclose(t[column], t2[column]) + if t[column].dtype.byteorder in ('=', '|'): + assert t[column].dtype == t2[column].dtype + else: + assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype + + def test_2d(self): + + t = table.Table() + t['a'] = [1, 2, 3] + t['b'] = np.ones((3, 2)) + + with pytest.raises(ValueError) as exc: + t.to_pandas() + assert exc.value.args[0] == "Cannot convert a table with multi-dimensional columns to a pandas DataFrame" + + def test_mixin(self): + + from ...coordinates import SkyCoord + + t = table.Table() + t['c'] = SkyCoord([1, 2, 3], [4, 5, 6], unit='deg') + + with pytest.raises(ValueError) as exc: + t.to_pandas() + assert exc.value.args[0] == "Cannot convert a table with mixin columns to a pandas DataFrame" + + def test_masking(self): + + t = table.Table(masked=True) + + t['a'] = [1, 2, 3] + t['a'].mask = [True, False, True] + + t['b'] = [1., 2., 3.] + t['b'].mask = [False, False, True] + + t['u'] = ['a', 'b', 'c'] + t['u'].mask = [False, True, False] + + t['s'] = ['a', 'b', 'c'] + t['s'].mask = [False, True, False] + + d = t.to_pandas() + + t2 = table.Table.from_pandas(d) + + for name, column in t.columns.items(): + assert np.all(column.data == t2[name].data) + assert np.all(column.mask == t2[name].mask) + # Masked integer type comes back as float. Nothing we can do about this. + if column.dtype.kind == 'i': + assert t2[name].dtype.kind == 'f' + else: + if column.dtype.byteorder in ('=', '|'): + assert column.dtype == t2[name].dtype + else: + assert column.byteswap().newbyteorder().dtype == t2[name].dtype + + +@pytest.mark.usefixtures('table_types') +class TestReplaceColumn(SetupData): + def test_fail_replace_column(self, table_types): + """Raise exception when trying to replace column via table.columns object""" + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + + with pytest.raises(ValueError): + t.columns['a'] = [1, 2, 3] + + with pytest.raises(ValueError): + t.replace_column('not there', [1, 2, 3]) + + def test_replace_column(self, table_types): + """Replace existing column with a new column""" + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + ta = t['a'] + tb = t['b'] + + vals = [1.2, 3.4, 5.6] + for col in (vals, + table_types.Column(vals), + table_types.Column(vals, name='a'), + table_types.Column(vals, name='b')): + t.replace_column('a', col) + assert np.all(t['a'] == vals) + assert t['a'] is not ta # New a column + assert t['b'] is tb # Original b column unchanged + assert t.colnames == ['a', 'b'] + assert t['a'].meta == {} + assert t['a'].format is None + + def test_replace_index_column(self, table_types): + """Replace index column and generate expected exception""" + self._setup(table_types) + t = table_types.Table([self.a, self.b]) + t.add_index('a') + + with pytest.raises(ValueError) as err: + t.replace_column('a', [1, 2, 3]) + assert err.value.args[0] == 'cannot replace a table index column' + + +class Test__Astropy_Table__(): + """ + Test initializing a Table subclass from a table-like object that + implements the __astropy_table__ interface method. + """ + + class SimpleTable(object): + def __init__(self): + self.columns = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9] * u.m] + self.names = ['a', 'b', 'c'] + self.meta = OrderedDict([('a', 1), ('b', 2)]) + + def __astropy_table__(self, cls, copy, **kwargs): + a, b, c = self.columns + c.info.name = 'c' + cols = [table.Column(a, name='a'), + table.MaskedColumn(b, name='b'), + c] + names = [col.info.name for col in cols] + return cls(cols, names=names, copy=copy, meta=kwargs or self.meta) + + def test_simple_1(self): + """Make a SimpleTable and convert to Table, QTable with copy=False, True""" + for table_cls in (table.Table, table.QTable): + col_c_class = u.Quantity if table_cls is table.QTable else table.MaskedColumn + for cpy in (False, True): + st = self.SimpleTable() + # Test putting in a non-native kwarg `extra_meta` to Table initializer + t = table_cls(st, copy=cpy, extra_meta='extra!') + assert t.colnames == ['a', 'b', 'c'] + assert t.meta == {'extra_meta': 'extra!'} + assert np.all(t['a'] == st.columns[0]) + assert np.all(t['b'] == st.columns[1]) + vals = t['c'].value if table_cls is table.QTable else t['c'] + assert np.all(st.columns[2].value == vals) + + assert isinstance(t['a'], table.MaskedColumn) + assert isinstance(t['b'], table.MaskedColumn) + assert isinstance(t['c'], col_c_class) + assert t['c'].unit is u.m + assert type(t) is table_cls + + # Copy being respected? + t['a'][0] = 10 + assert st.columns[0][0] == 1 if cpy else 10 + + def test_simple_2(self): + """Test converting a SimpleTable and changing column names and types""" + st = self.SimpleTable() + dtypes = [np.int32, np.float32, np.float16] + names = ['a', 'b', 'c'] + t = table.Table(st, dtype=dtypes, names=names, meta=OrderedDict([('c', 3)])) + assert t.colnames == names + assert all(col.dtype.type is dtype + for col, dtype in zip(t.columns.values(), dtypes)) + + # The supplied meta is ignored. This is consistent with current + # behavior when initializing from an existing astropy Table. + assert t.meta == st.meta + + def test_kwargs_exception(self): + """If extra kwargs provided but without initializing with a table-like + object, exception is raised""" + with pytest.raises(TypeError) as err: + table.Table([[1]], extra_meta='extra!') + assert '__init__() got unexpected keyword argument' in str(err) + + +def test_replace_column_qtable(): + """Replace existing Quantity column with a new column in a QTable""" + a = [1, 2, 3] * u.m + b = [4, 5, 6] + t = table.QTable([a, b], names=['a', 'b']) + + ta = t['a'] + tb = t['b'] + ta.info.meta = {'aa': [0, 1, 2, 3, 4]} + ta.info.format = '%f' + + t.replace_column('a', a.to('cm')) + assert np.all(t['a'] == ta) + assert t['a'] is not ta # New a column + assert t['b'] is tb # Original b column unchanged + assert t.colnames == ['a', 'b'] + assert t['a'].info.meta is None + assert t['a'].info.format is None + + +def test_replace_update_column_via_setitem(): + """ + Test table update like ``t['a'] = value``. This leverages off the + already well-tested ``replace_column`` and in-place update + ``t['a'][:] = value``, so this testing is fairly light. + """ + a = [1, 2] * u.m + b = [3, 4] + t = table.QTable([a, b], names=['a', 'b']) + assert isinstance(t['a'], u.Quantity) + + # Inplace update + ta = t['a'] + t['a'] = 5 * u.m + assert np.all(t['a'] == [5, 5] * u.m) + assert t['a'] is ta + + # Replace + t['a'] = [5, 6] + assert np.all(t['a'] == [5, 6]) + assert isinstance(t['a'], table.Column) + assert t['a'] is not ta + + +def test_replace_update_column_via_setitem_warnings_normal(): + """ + Test warnings related to table replace change in #5556: + Normal warning-free replace + """ + t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) + with catch_warnings() as w: + with table.conf.set_temp('replace_warnings', + ['refcount', 'attributes', 'slice']): + t['a'] = 0 # in-place update + assert len(w) == 0 + + t['a'] = [10, 20, 30] # replace column + assert len(w) == 0 + + +def test_replace_update_column_via_setitem_warnings_slice(): + """ + Test warnings related to table replace change in #5556: + Replace a slice, one warning. + """ + t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) + with catch_warnings() as w: + with table.conf.set_temp('replace_warnings', + ['refcount', 'attributes', 'slice']): + t2 = t[:2] + + t2['a'] = 0 # in-place slice update + assert np.all(t['a'] == [0, 0, 3]) + assert len(w) == 0 + + t2['a'] = [10, 20] # replace slice + assert len(w) == 1 + assert "replaced column 'a' which looks like an array slice" in str(w[0].message) + + +def test_replace_update_column_via_setitem_warnings_attributes(): + """ + Test warnings related to table replace change in #5556: + Lost attributes. + """ + t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) + t['a'].unit = 'm' + + with catch_warnings() as w: + with table.conf.set_temp('replace_warnings', + ['refcount', 'attributes', 'slice']): + t['a'] = [10, 20, 30] + assert len(w) == 1 + assert "replaced column 'a' and column attributes ['unit']" in str(w[0].message) + + +def test_replace_update_column_via_setitem_warnings_refcount(): + """ + Test warnings related to table replace change in #5556: + Reference count changes. + """ + t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) + ta = t['a'] # Generate an extra reference to original column + + with catch_warnings() as w: + with table.conf.set_temp('replace_warnings', + ['refcount', 'attributes', 'slice']): + t['a'] = [10, 20, 30] + assert len(w) == 1 + assert "replaced column 'a' and the number of references" in str(w[0].message) + + +def test_replace_update_column_via_setitem_warnings_always(): + """ + Test warnings related to table replace change in #5556: + Test 'always' setting that raises warning for any replace. + """ + t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) + + with catch_warnings() as w: + with table.conf.set_temp('replace_warnings', ['always']): + t['a'] = 0 # in-place slice update + assert len(w) == 0 + + from inspect import currentframe, getframeinfo + frameinfo = getframeinfo(currentframe()) + t['a'] = [10, 20, 30] # replace column + assert len(w) == 1 + assert "replaced column 'a'" == str(w[0].message) + + # Make sure the warning points back to the user code line + assert w[0].lineno == frameinfo.lineno + 1 + assert w[0].category is table.TableReplaceWarning + assert 'test_table' in w[0].filename + + +def test_replace_update_column_via_setitem_replace_inplace(): + """ + Test the replace_inplace config option related to #5556. In this + case no replace is done. + """ + t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b']) + ta = t['a'] + t['a'].unit = 'm' + + with catch_warnings() as w: + with table.conf.set_temp('replace_inplace', True): + with table.conf.set_temp('replace_warnings', + ['always', 'refcount', 'attributes', 'slice']): + t['a'] = 0 # in-place update + assert len(w) == 0 + assert ta is t['a'] + + t['a'] = [10, 20, 30] # normally replaces column, but not now + assert len(w) == 0 + assert ta is t['a'] + assert np.all(t['a'] == [10, 20, 30]) + + +def test_primary_key_is_inherited(): + """Test whether a new Table inherits the primary_key attribute from + its parent Table. Issue #4672""" + + t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=('a', 'b')) + t.add_index('a') + original_key = t.primary_key + + # can't test if tuples are equal, so just check content + assert original_key[0] is 'a' + + t2 = t[:] + t3 = t.copy() + t4 = table.Table(t) + + # test whether the reference is the same in the following + assert original_key == t2.primary_key + assert original_key == t3.primary_key + assert original_key == t4.primary_key + + # just test one element, assume rest are equal if assert passes + assert t.loc[1] == t2.loc[1] + assert t.loc[1] == t3.loc[1] + assert t.loc[1] == t4.loc[1] + + +def test_qtable_read_for_ipac_table_with_char_columns(): + '''Test that a char column of a QTable is assigned no unit and not + a dimensionless unit, otherwise conversion of reader output to + QTable fails.''' + t1 = table.QTable([["A"]], names="B") + out = StringIO() + t1.write(out, format="ascii.ipac") + t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False) + assert t2["B"].unit is None diff --git a/astropy/tests/__init__.py b/astropy/tests/__init__.py new file mode 100644 index 0000000..89efdde --- /dev/null +++ b/astropy/tests/__init__.py @@ -0,0 +1,28 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This package contains utilities to run the astropy test suite, tools +for writing tests, and general tests that are not associated with a +particular package. +""" + +# NOTE: This is retained only for backwards compatibility. Affiliated packages +# should no longer import `disable_internet` from `astropy.tests`. It is now +# available from `pytest_remotedata`. However, this is not the recommended +# mechanism for controlling access to remote data in tests. Instead, packages +# should make use of decorators provided by the pytest_remotedata plugin: +# - `@pytest.mark.remote_data` for tests that require remote data access +# - `@pytest.mark.internet_off` for tests that should only run when remote data +# access is disabled. +# Remote data access for the test suite is controlled by the `--remote-data` +# command line flag. This is either passed to `pytest` directly or to the +# `setup.py test` command. +# +# TODO: This import should eventually be removed once backwards compatibility +# is no longer supported. + +from pkgutil import find_loader + +if find_loader('pytest_remotedata') is not None: + from pytest_remotedata import disable_internet +else: + from ..extern.plugins.pytest_remotedata import disable_internet diff --git a/astropy/tests/command.py b/astropy/tests/command.py new file mode 100644 index 0000000..bd869a5 --- /dev/null +++ b/astropy/tests/command.py @@ -0,0 +1,331 @@ +""" +Implements the wrapper for the Astropy test runner in the form of the +``./setup.py test`` distutils command. +""" + + +import os +import shutil +import subprocess +import sys +import tempfile + +from setuptools import Command + +from ..extern import six + + +def _fix_user_options(options): + """ + This is for Python 2.x and 3.x compatibility. distutils expects Command + options to all be byte strings on Python 2 and Unicode strings on Python 3. + """ + + def to_str_or_none(x): + if x is None: + return None + return str(x) + + return [tuple(to_str_or_none(x) for x in y) for y in options] + + +class FixRemoteDataOption(type): + """ + This metaclass is used to catch cases where the user is running the tests + with --remote-data. We've now changed the --remote-data option so that it + takes arguments, but we still want --remote-data to work as before and to + enable all remote tests. With this metaclass, we can modify sys.argv + before distutils/setuptools try to parse the command-line options. + """ + def __init__(cls, name, bases, dct): + + try: + idx = sys.argv.index('--remote-data') + except ValueError: + pass + else: + sys.argv[idx] = '--remote-data=any' + + try: + idx = sys.argv.index('-R') + except ValueError: + pass + else: + sys.argv[idx] = '-R=any' + + return super(FixRemoteDataOption, cls).__init__(name, bases, dct) + + +@six.add_metaclass(FixRemoteDataOption) +class AstropyTest(Command, object): + description = 'Run the tests for this package' + + user_options = [ + ('package=', 'P', + "The name of a specific package to test, e.g. 'io.fits' or 'utils'. " + "If nothing is specified, all default tests are run."), + ('test-path=', 't', + 'Specify a test location by path. If a relative path to a .py file, ' + 'it is relative to the built package, so e.g., a leading "astropy/" ' + 'is necessary. If a relative path to a .rst file, it is relative to ' + 'the directory *below* the --docs-path directory, so a leading ' + '"docs/" is usually necessary. May also be an absolute path.'), + ('verbose-results', 'V', + 'Turn on verbose output from pytest.'), + ('plugins=', 'p', + 'Plugins to enable when running pytest.'), + ('pastebin=', 'b', + "Enable pytest pastebin output. Either 'all' or 'failed'."), + ('args=', 'a', + 'Additional arguments to be passed to pytest.'), + ('remote-data=', 'R', 'Run tests that download remote data. Should be ' + 'one of none/astropy/any (defaults to none).'), + ('pep8', '8', + 'Enable PEP8 checking and disable regular tests. ' + 'Requires the pytest-pep8 plugin.'), + ('pdb', 'd', + 'Start the interactive Python debugger on errors.'), + ('coverage', 'c', + 'Create a coverage report. Requires the coverage package.'), + ('open-files', 'o', 'Fail if any tests leave files open. Requires the ' + 'psutil package.'), + ('parallel=', 'j', + 'Run the tests in parallel on the specified number of ' + 'CPUs. If negative, all the cores on the machine will be ' + 'used. Requires the pytest-xdist plugin.'), + ('docs-path=', None, + 'The path to the documentation .rst files. If not provided, and ' + 'the current directory contains a directory called "docs", that ' + 'will be used.'), + ('skip-docs', None, + "Don't test the documentation .rst files."), + ('repeat=', None, + 'How many times to repeat each test (can be used to check for ' + 'sporadic failures).'), + ('temp-root=', None, + 'The root directory in which to create the temporary testing files. ' + 'If unspecified the system default is used (e.g. /tmp) as explained ' + 'in the documentation for tempfile.mkstemp.') + ] + + user_options = _fix_user_options(user_options) + + package_name = '' + + def initialize_options(self): + self.package = None + self.test_path = None + self.verbose_results = False + self.plugins = None + self.pastebin = None + self.args = None + self.remote_data = 'none' + self.pep8 = False + self.pdb = False + self.coverage = False + self.open_files = False + self.parallel = 0 + self.docs_path = None + self.skip_docs = False + self.repeat = None + self.temp_root = None + + def finalize_options(self): + # Normally we would validate the options here, but that's handled in + # run_tests + pass + + def generate_testing_command(self): + """ + Build a Python script to run the tests. + """ + + cmd_pre = '' # Commands to run before the test function + cmd_post = '' # Commands to run after the test function + + if self.coverage: + pre, post = self._generate_coverage_commands() + cmd_pre += pre + cmd_post += post + + if six.PY2: + set_flag = "import __builtin__; __builtin__._ASTROPY_TEST_ = True" + else: + set_flag = "import builtins; builtins._ASTROPY_TEST_ = True" + + cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = (' + '{1.package_name}.test(' + 'package={1.package!r}, ' + 'test_path={1.test_path!r}, ' + 'args={1.args!r}, ' + 'plugins={1.plugins!r}, ' + 'verbose={1.verbose_results!r}, ' + 'pastebin={1.pastebin!r}, ' + 'remote_data={1.remote_data!r}, ' + 'pep8={1.pep8!r}, ' + 'pdb={1.pdb!r}, ' + 'open_files={1.open_files!r}, ' + 'parallel={1.parallel!r}, ' + 'docs_path={1.docs_path!r}, ' + 'skip_docs={1.skip_docs!r}, ' + 'repeat={1.repeat!r})); ' + '{cmd_post}' + 'sys.exit(result)') + return cmd.format(set_flag, self, cmd_pre=cmd_pre, cmd_post=cmd_post) + + def run(self): + """ + Run the tests! + """ + # Install the runtime and test dependencies. + if self.distribution.install_requires: + self.distribution.fetch_build_eggs( + self.distribution.install_requires) + if self.distribution.tests_require: + self.distribution.fetch_build_eggs(self.distribution.tests_require) + + # Ensure there is a doc path + if self.docs_path is None: + cfg_docs_dir = self.distribution.get_option_dict('build_docs').get('source_dir', None) + + # Some affiliated packages use this. + # See astropy/package-template#157 + if cfg_docs_dir is not None and os.path.exists(cfg_docs_dir[1]): + self.docs_path = os.path.abspath(cfg_docs_dir[1]) + + # fall back on a default path of "docs" + elif os.path.exists('docs'): # pragma: no cover + self.docs_path = os.path.abspath('docs') + + # Build a testing install of the package + self._build_temp_install() + + # Run everything in a try: finally: so that the tmp dir gets deleted. + try: + # Construct this modules testing command + cmd = self.generate_testing_command() + + # Run the tests in a subprocess--this is necessary since + # new extension modules may have appeared, and this is the + # easiest way to set up a new environment + + # On Python 3.x prior to 3.3, the creation of .pyc files + # is not atomic. py.test jumps through some hoops to make + # this work by parsing import statements and carefully + # importing files atomically. However, it can't detect + # when __import__ is used, so its carefulness still fails. + # The solution here (admittedly a bit of a hack), is to + # turn off the generation of .pyc files altogether by + # passing the `-B` switch to `python`. This does mean + # that each core will have to compile .py file to bytecode + # itself, rather than getting lucky and borrowing the work + # already done by another core. Compilation is an + # insignificant fraction of total testing time, though, so + # it's probably not worth worrying about. + testproc = subprocess.Popen( + [sys.executable, '-B', '-c', cmd], + cwd=self.testing_path, close_fds=False) + retcode = testproc.wait() + except KeyboardInterrupt: + import signal + # If a keyboard interrupt is handled, pass it to the test + # subprocess to prompt pytest to initiate its teardown + testproc.send_signal(signal.SIGINT) + retcode = testproc.wait() + finally: + # Remove temporary directory + shutil.rmtree(self.tmp_dir) + + raise SystemExit(retcode) + + def _build_temp_install(self): + """ + Install the package and to a temporary directory for the purposes of + testing. This allows us to test the install command, include the + entry points, and also avoids creating pyc and __pycache__ directories + inside the build directory + """ + + # On OSX the default path for temp files is under /var, but in most + # cases on OSX /var is actually a symlink to /private/var; ensure we + # dereference that link, because py.test is very sensitive to relative + # paths... + + tmp_dir = tempfile.mkdtemp(prefix=self.package_name + '-test-', + dir=self.temp_root) + self.tmp_dir = os.path.realpath(tmp_dir) + + # We now install the package to the temporary directory. We do this + # rather than build and copy because this will ensure that e.g. entry + # points work. + self.reinitialize_command('install') + install_cmd = self.distribution.get_command_obj('install') + install_cmd.prefix = self.tmp_dir + self.run_command('install') + + # We now get the path to the site-packages directory that was created + # inside self.tmp_dir + install_cmd = self.get_finalized_command('install') + self.testing_path = install_cmd.install_lib + + # Ideally, docs_path is set properly in run(), but if it is still + # not set here, do not pretend it is, otherwise bad things happen. + # See astropy/package-template#157 + if self.docs_path is not None: + new_docs_path = os.path.join(self.testing_path, + os.path.basename(self.docs_path)) + shutil.copytree(self.docs_path, new_docs_path) + self.docs_path = new_docs_path + + shutil.copy('setup.cfg', self.testing_path) + + def _generate_coverage_commands(self): + """ + This method creates the post and pre commands if coverage is to be + generated + """ + if self.parallel != 0: + raise ValueError( + "--coverage can not be used with --parallel") + + try: + import coverage # pylint: disable=W0611 + except ImportError: + raise ImportError( + "--coverage requires that the coverage package is " + "installed.") + + # Don't use get_pkg_data_filename here, because it + # requires importing astropy.config and thus screwing + # up coverage results for those packages. + coveragerc = os.path.join( + self.testing_path, self.package_name, 'tests', 'coveragerc') + + # We create a coveragerc that is specific to the version + # of Python we're running, so that we can mark branches + # as being specifically for Python 2 or Python 3 + with open(coveragerc, 'r') as fd: + coveragerc_content = fd.read() + if not six.PY2: + ignore_python_version = '2' + else: + ignore_python_version = '3' + coveragerc_content = coveragerc_content.replace( + "{ignore_python_version}", ignore_python_version).replace( + "{packagename}", self.package_name) + tmp_coveragerc = os.path.join(self.tmp_dir, 'coveragerc') + with open(tmp_coveragerc, 'wb') as tmp: + tmp.write(coveragerc_content.encode('utf-8')) + + cmd_pre = ( + 'import coverage; ' + 'cov = coverage.coverage(data_file="{0}", config_file="{1}"); ' + 'cov.start();'.format( + os.path.abspath(".coverage"), tmp_coveragerc)) + cmd_post = ( + 'cov.stop(); ' + 'from astropy.tests.helper import _save_coverage; ' + '_save_coverage(cov, result, "{0}", "{1}");'.format( + os.path.abspath('.'), self.testing_path)) + + return cmd_pre, cmd_post diff --git a/astropy/tests/coveragerc b/astropy/tests/coveragerc new file mode 100644 index 0000000..7e77ef1 --- /dev/null +++ b/astropy/tests/coveragerc @@ -0,0 +1,33 @@ +[run] +source = astropy +omit = + astropy/__init__* + astropy/conftest.py + astropy/*setup* + astropy/*/tests/* + astropy/tests/test_* + astropy/extern/* + astropy/sphinx/* + astropy/utils/compat/* + astropy/version* + astropy/wcs/docstrings* + astropy/_erfa/* + +[report] +exclude_lines = + # Have to re-enable the standard pragma + pragma: no cover + + # Don't complain about packages we have installed + except ImportError + + # Don't complain if tests don't hit assertions + raise AssertionError + raise NotImplementedError + + # Don't complain about script hooks + def main\(.*\): + + # Ignore branches that don't pertain to this version of Python + pragma: py{ignore_python_version} + six.PY{ignore_python_version} \ No newline at end of file diff --git a/astropy/tests/disable_internet.py b/astropy/tests/disable_internet.py new file mode 100644 index 0000000..1bd0b92 --- /dev/null +++ b/astropy/tests/disable_internet.py @@ -0,0 +1,153 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import contextlib +import socket + +from ..extern.six.moves import urllib + +# save original socket method for restoration +# These are global so that re-calling the turn_off_internet function doesn't +# overwrite them again +socket_original = socket.socket +socket_create_connection = socket.create_connection +socket_bind = socket.socket.bind +socket_connect = socket.socket.connect + + +INTERNET_OFF = False + +# urllib2 uses a global variable to cache its default "opener" for opening +# connections for various protocols; we store it off here so we can restore to +# the default after re-enabling internet use +_orig_opener = None + + +# ::1 is apparently another valid name for localhost? +# it is returned by getaddrinfo when that function is given localhost + +def check_internet_off(original_function, allow_astropy_data=False): + """ + Wraps ``original_function``, which in most cases is assumed + to be a `socket.socket` method, to raise an `IOError` for any operations + on non-local AF_INET sockets. + """ + + def new_function(*args, **kwargs): + if isinstance(args[0], socket.socket): + if not args[0].family in (socket.AF_INET, socket.AF_INET6): + # Should be fine in all but some very obscure cases + # More to the point, we don't want to affect AF_UNIX + # sockets. + return original_function(*args, **kwargs) + host = args[1][0] + addr_arg = 1 + valid_hosts = ('localhost', '127.0.0.1', '::1') + else: + # The only other function this is used to wrap currently is + # socket.create_connection, which should be passed a 2-tuple, but + # we'll check just in case + if not (isinstance(args[0], tuple) and len(args[0]) == 2): + return original_function(*args, **kwargs) + + host = args[0][0] + addr_arg = 0 + valid_hosts = ('localhost', '127.0.0.1') + + if allow_astropy_data: + for valid_host in ('data.astropy.org', 'astropy.stsci.edu', 'www.astropy.org'): + valid_host_ip = socket.gethostbyname(valid_host) + valid_hosts += (valid_host, valid_host_ip) + + hostname = socket.gethostname() + fqdn = socket.getfqdn() + + if host in (hostname, fqdn): + host = 'localhost' + new_addr = (host, args[addr_arg][1]) + args = args[:addr_arg] + (new_addr,) + args[addr_arg + 1:] + + if any(h in host for h in valid_hosts): + return original_function(*args, **kwargs) + else: + raise IOError("An attempt was made to connect to the internet " + "by a test that was not marked `remote_data`. The " + "requested host was: {0}".format(host)) + return new_function + + +def turn_off_internet(verbose=False, allow_astropy_data=False): + """ + Disable internet access via python by preventing connections from being + created using the socket module. Presumably this could be worked around by + using some other means of accessing the internet, but all default python + modules (urllib, requests, etc.) use socket [citation needed]. + """ + + global INTERNET_OFF + global _orig_opener + + if INTERNET_OFF: + return + + INTERNET_OFF = True + + __tracebackhide__ = True + if verbose: + print("Internet access disabled") + + # Update urllib2 to force it not to use any proxies + # Must use {} here (the default of None will kick off an automatic search + # for proxies) + _orig_opener = urllib.request.build_opener() + no_proxy_handler = urllib.request.ProxyHandler({}) + opener = urllib.request.build_opener(no_proxy_handler) + urllib.request.install_opener(opener) + + socket.create_connection = check_internet_off(socket_create_connection, allow_astropy_data=allow_astropy_data) + socket.socket.bind = check_internet_off(socket_bind, allow_astropy_data=allow_astropy_data) + socket.socket.connect = check_internet_off(socket_connect, allow_astropy_data=allow_astropy_data) + + return socket + + +def turn_on_internet(verbose=False): + """ + Restore internet access. Not used, but kept in case it is needed. + """ + + global INTERNET_OFF + global _orig_opener + + if not INTERNET_OFF: + return + + INTERNET_OFF = False + + if verbose: + print("Internet access enabled") + + urllib.request.install_opener(_orig_opener) + + socket.create_connection = socket_create_connection + socket.socket.bind = socket_bind + socket.socket.connect = socket_connect + return socket + + +@contextlib.contextmanager +def no_internet(verbose=False): + """Context manager to temporarily disable internet access (if not already + disabled). If it was already disabled before entering the context manager + (i.e. `turn_off_internet` was called previously) then this is a no-op and + leaves internet access disabled until a manual call to `turn_on_internet`. + """ + + already_disabled = INTERNET_OFF + + turn_off_internet(verbose=verbose) + try: + yield + finally: + if not already_disabled: + turn_on_internet(verbose=verbose) diff --git a/astropy/tests/helper.py b/astropy/tests/helper.py new file mode 100644 index 0000000..17336cc --- /dev/null +++ b/astropy/tests/helper.py @@ -0,0 +1,526 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +This module provides the tools used to internally run the astropy test suite +from the installed astropy. It makes use of the `pytest` testing framework. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import functools +import os +import sys +import types +import warnings + +import pytest + +from ..extern import six +from ..extern.six.moves import cPickle as pickle + +try: + # Import pkg_resources to prevent it from issuing warnings upon being + # imported from within py.test. See + # https://github.com/astropy/astropy/pull/537 for a detailed explanation. + import pkg_resources # pylint: disable=W0611 +except ImportError: + pass + +from ..utils.exceptions import (AstropyDeprecationWarning, + AstropyPendingDeprecationWarning) + + +# For backward-compatibility with affiliated packages +from .runner import TestRunner # pylint: disable=W0611 + +__all__ = ['raises', 'enable_deprecations_as_exceptions', 'remote_data', + 'treat_deprecations_as_exceptions', 'catch_warnings', + 'assert_follows_unicode_guidelines', 'quantity_allclose', + 'assert_quantity_allclose', 'check_pickling_recovery', + 'pickle_protocol', 'generic_recursive_equality_test'] + +# pytest marker to mark tests which get data from the web +remote_data = pytest.mark.remote_data + + +# This is for Python 2.x and 3.x compatibility. distutils expects +# options to all be byte strings on Python 2 and Unicode strings on +# Python 3. +def _fix_user_options(options): + def to_str_or_none(x): + if x is None: + return None + return str(x) + + return [tuple(to_str_or_none(x) for x in y) for y in options] + + +def _save_coverage(cov, result, rootdir, testing_path): + """ + This method is called after the tests have been run in coverage mode + to cleanup and then save the coverage data and report. + """ + from ..utils.console import color_print + + if result != 0: + return + + # The coverage report includes the full path to the temporary + # directory, so we replace all the paths with the true source + # path. Note that this will not work properly for packages that still + # rely on 2to3. + try: + # Coverage 4.0: _harvest_data has been renamed to get_data, the + # lines dict is private + cov.get_data() + except AttributeError: + # Coverage < 4.0 + cov._harvest_data() + lines = cov.data.lines + else: + lines = cov.data._lines + + for key in list(lines.keys()): + new_path = os.path.relpath( + os.path.realpath(key), + os.path.realpath(testing_path)) + new_path = os.path.abspath( + os.path.join(rootdir, new_path)) + lines[new_path] = lines.pop(key) + + color_print('Saving coverage data in .coverage...', 'green') + cov.save() + + color_print('Saving HTML coverage report in htmlcov...', 'green') + cov.html_report(directory=os.path.join(rootdir, 'htmlcov')) + + +class raises(object): + """ + A decorator to mark that a test should raise a given exception. + Use as follows:: + + @raises(ZeroDivisionError) + def test_foo(): + x = 1/0 + + This can also be used a context manager, in which case it is just + an alias for the ``pytest.raises`` context manager (because the + two have the same name this help avoid confusion by being + flexible). + """ + + # pep-8 naming exception -- this is a decorator class + def __init__(self, exc): + self._exc = exc + self._ctx = None + + def __call__(self, func): + @functools.wraps(func) + def run_raises_test(*args, **kwargs): + pytest.raises(self._exc, func, *args, **kwargs) + return run_raises_test + + def __enter__(self): + self._ctx = pytest.raises(self._exc) + return self._ctx.__enter__() + + def __exit__(self, *exc_info): + return self._ctx.__exit__(*exc_info) + + +_deprecations_as_exceptions = False +_include_astropy_deprecations = True +_modules_to_ignore_on_import = set([ + 'compiler', # A deprecated stdlib module used by py.test + 'scipy', + 'pygments', + 'ipykernel', + 'IPython', # deprecation warnings for async and await + 'setuptools']) +_warnings_to_ignore_entire_module = set([]) +_warnings_to_ignore_by_pyver = { + (3, 4): set([ + # py.test reads files with the 'U' flag, which is now + # deprecated in Python 3.4. + r"'U' mode is deprecated", + # BeautifulSoup4 triggers warning in stdlib's html module.x + r"The strict argument and mode are deprecated\.", + r"The value of convert_charrefs will become True in 3\.5\. " + r"You are encouraged to set the value explicitly\."]), + (3, 5): set([ + # py.test reads files with the 'U' flag, which is + # deprecated. + r"'U' mode is deprecated", + # py.test raised this warning in inspect on Python 3.5. + # See https://github.com/pytest-dev/pytest/pull/1009 + # Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec() + r"inspect\.getargspec\(\) is deprecated, use " + r"inspect\.signature\(\) instead"]), + (3, 6): set([ + # py.test reads files with the 'U' flag, which is + # deprecated. + r"'U' mode is deprecated", + # inspect raises this slightly different warning on Python 3.6. + # Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec() + r"inspect\.getargspec\(\) is deprecated, use " + r"inspect\.signature\(\) or inspect\.getfullargspec\(\)"])} + + +def enable_deprecations_as_exceptions(include_astropy_deprecations=True, + modules_to_ignore_on_import=[], + warnings_to_ignore_entire_module=[], + warnings_to_ignore_by_pyver={}): + """ + Turn on the feature that turns deprecations into exceptions. + + Parameters + ---------- + include_astropy_deprecations : bool + If set to `True`, ``AstropyDeprecationWarning`` and + ``AstropyPendingDeprecationWarning`` are also turned into exceptions. + + modules_to_ignore_on_import : list of str + List of additional modules that generate deprecation warnings + on import, which are to be ignored. By default, these are already + included: ``compiler``, ``scipy``, ``pygments``, ``ipykernel``, and + ``setuptools``. + + warnings_to_ignore_entire_module : list of str + List of modules with deprecation warnings to ignore completely, + not just during import. If ``include_astropy_deprecations=True`` + is given, ``AstropyDeprecationWarning`` and + ``AstropyPendingDeprecationWarning`` are also ignored for the modules. + + warnings_to_ignore_by_pyver : dict + Dictionary mapping tuple of ``(major, minor)`` Python version to + a list of deprecation warning messages to ignore. This is in + addition of those already ignored by default + (see ``_warnings_to_ignore_by_pyver`` values). + + """ + global _deprecations_as_exceptions + _deprecations_as_exceptions = True + + global _include_astropy_deprecations + _include_astropy_deprecations = include_astropy_deprecations + + global _modules_to_ignore_on_import + _modules_to_ignore_on_import.update(modules_to_ignore_on_import) + + global _warnings_to_ignore_entire_module + _warnings_to_ignore_entire_module.update(warnings_to_ignore_entire_module) + + global _warnings_to_ignore_by_pyver + for key, val in six.iteritems(warnings_to_ignore_by_pyver): + if key in _warnings_to_ignore_by_pyver: + _warnings_to_ignore_by_pyver[key].update(val) + else: + _warnings_to_ignore_by_pyver[key] = set(val) + + +def treat_deprecations_as_exceptions(): + """ + Turn all DeprecationWarnings (which indicate deprecated uses of + Python itself or Numpy, but not within Astropy, where we use our + own deprecation warning class) into exceptions so that we find + out about them early. + + This completely resets the warning filters and any "already seen" + warning state. + """ + # First, totally reset the warning state. The modules may change during + # this iteration thus we copy the original state to a list to iterate + # on. See https://github.com/astropy/astropy/pull/5513. + for module in list(six.itervalues(sys.modules)): + # We don't want to deal with six.MovedModules, only "real" + # modules. + if (isinstance(module, types.ModuleType) and + hasattr(module, '__warningregistry__')): + del module.__warningregistry__ + + if not _deprecations_as_exceptions: + return + + warnings.resetwarnings() + + # Hide the next couple of DeprecationWarnings + warnings.simplefilter('ignore', DeprecationWarning) + # Here's the wrinkle: a couple of our third-party dependencies + # (py.test and scipy) are still using deprecated features + # themselves, and we'd like to ignore those. Fortunately, those + # show up only at import time, so if we import those things *now*, + # before we turn the warnings into exceptions, we're golden. + for m in _modules_to_ignore_on_import: + try: + __import__(m) + except ImportError: + pass + + # Now, start over again with the warning filters + warnings.resetwarnings() + # Now, turn DeprecationWarnings into exceptions + _all_warns = [DeprecationWarning] + + # Only turn astropy deprecation warnings into exceptions if requested + if _include_astropy_deprecations: + _all_warns += [AstropyDeprecationWarning, + AstropyPendingDeprecationWarning] + + for w in _all_warns: + warnings.filterwarnings("error", ".*", w) + + # This ignores all deprecation warnings from given module(s), + # not just on import, for use of Astropy affiliated packages. + for m in _warnings_to_ignore_entire_module: + for w in _all_warns: + warnings.filterwarnings('ignore', category=w, module=m) + + for v in _warnings_to_ignore_by_pyver: + if sys.version_info[:2] == v: + for s in _warnings_to_ignore_by_pyver[v]: + warnings.filterwarnings("ignore", s, DeprecationWarning) + + +class catch_warnings(warnings.catch_warnings): + """ + A high-powered version of warnings.catch_warnings to use for testing + and to make sure that there is no dependence on the order in which + the tests are run. + + This completely blitzes any memory of any warnings that have + appeared before so that all warnings will be caught and displayed. + + ``*args`` is a set of warning classes to collect. If no arguments are + provided, all warnings are collected. + + Use as follows:: + + with catch_warnings(MyCustomWarning) as w: + do.something.bad() + assert len(w) > 0 + """ + + def __init__(self, *classes): + super(catch_warnings, self).__init__(record=True) + self.classes = classes + + def __enter__(self): + warning_list = super(catch_warnings, self).__enter__() + treat_deprecations_as_exceptions() + if len(self.classes) == 0: + warnings.simplefilter('always') + else: + warnings.simplefilter('ignore') + for cls in self.classes: + warnings.simplefilter('always', cls) + return warning_list + + def __exit__(self, type, value, traceback): + treat_deprecations_as_exceptions() + + +class ignore_warnings(catch_warnings): + """ + This can be used either as a context manager or function decorator to + ignore all warnings that occur within a function or block of code. + + An optional category option can be supplied to only ignore warnings of a + certain category or categories (if a list is provided). + """ + + def __init__(self, category=None): + super(ignore_warnings, self).__init__() + + if isinstance(category, type) and issubclass(category, Warning): + self.category = [category] + else: + self.category = category + + def __call__(self, func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + # Originally this just reused self, but that doesn't work if the + # function is called more than once so we need to make a new + # context manager instance for each call + with self.__class__(category=self.category): + return func(*args, **kwargs) + + return wrapper + + def __enter__(self): + retval = super(ignore_warnings, self).__enter__() + if self.category is not None: + for category in self.category: + warnings.simplefilter('ignore', category) + else: + warnings.simplefilter('ignore') + return retval + + +def assert_follows_unicode_guidelines( + x, roundtrip=None): + """ + Test that an object follows our Unicode policy. See + "Unicode guidelines" in the coding guidelines. + + Parameters + ---------- + x : object + The instance to test + + roundtrip : module, optional + When provided, this namespace will be used to evaluate + ``repr(x)`` and ensure that it roundtrips. It will also + ensure that ``__bytes__(x)`` and ``__unicode__(x)`` roundtrip. + If not provided, no roundtrip testing will be performed. + """ + from .. import conf + from ..extern import six + + with conf.set_temp('unicode_output', False): + bytes_x = bytes(x) + unicode_x = six.text_type(x) + repr_x = repr(x) + + assert isinstance(bytes_x, bytes) + bytes_x.decode('ascii') + assert isinstance(unicode_x, six.text_type) + unicode_x.encode('ascii') + assert isinstance(repr_x, six.string_types) + if isinstance(repr_x, bytes): + repr_x.decode('ascii') + else: + repr_x.encode('ascii') + + if roundtrip is not None: + assert x.__class__(bytes_x) == x + assert x.__class__(unicode_x) == x + assert eval(repr_x, roundtrip) == x + + with conf.set_temp('unicode_output', True): + bytes_x = bytes(x) + unicode_x = six.text_type(x) + repr_x = repr(x) + + assert isinstance(bytes_x, bytes) + bytes_x.decode('ascii') + assert isinstance(unicode_x, six.text_type) + assert isinstance(repr_x, six.string_types) + if isinstance(repr_x, bytes): + repr_x.decode('ascii') + else: + repr_x.encode('ascii') + + if roundtrip is not None: + assert x.__class__(bytes_x) == x + assert x.__class__(unicode_x) == x + assert eval(repr_x, roundtrip) == x + + +@pytest.fixture(params=[0, 1, -1]) +def pickle_protocol(request): + """ + Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced). + (Originally from astropy.table.tests.test_pickle) + """ + return request.param + + +def generic_recursive_equality_test(a, b, class_history): + """ + Check if the attributes of a and b are equal. Then, + check if the attributes of the attributes are equal. + """ + dict_a = a.__dict__ + dict_b = b.__dict__ + for key in dict_a: + assert key in dict_b,\ + "Did not pickle {0}".format(key) + if hasattr(dict_a[key], '__eq__'): + eq = (dict_a[key] == dict_b[key]) + if '__iter__' in dir(eq): + eq = (False not in eq) + assert eq, "Value of {0} changed by pickling".format(key) + + if hasattr(dict_a[key], '__dict__'): + if dict_a[key].__class__ in class_history: + # attempt to prevent infinite recursion + pass + else: + new_class_history = [dict_a[key].__class__] + new_class_history.extend(class_history) + generic_recursive_equality_test(dict_a[key], + dict_b[key], + new_class_history) + + +def check_pickling_recovery(original, protocol): + """ + Try to pickle an object. If successful, make sure + the object's attributes survived pickling and unpickling. + """ + f = pickle.dumps(original, protocol=protocol) + unpickled = pickle.loads(f) + class_history = [original.__class__] + generic_recursive_equality_test(original, unpickled, + class_history) + + +def assert_quantity_allclose(actual, desired, rtol=1.e-7, atol=None, + **kwargs): + """ + Raise an assertion if two objects are not equal up to desired tolerance. + + This is a :class:`~astropy.units.Quantity`-aware version of + :func:`numpy.testing.assert_allclose`. + """ + import numpy as np + np.testing.assert_allclose(*_unquantify_allclose_arguments(actual, desired, + rtol, atol), + **kwargs) + + +def quantity_allclose(a, b, rtol=1.e-5, atol=None, **kwargs): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + This is a :class:`~astropy.units.Quantity`-aware version of + :func:`numpy.allclose`. + """ + import numpy as np + return np.allclose(*_unquantify_allclose_arguments(a, b, rtol, atol), + **kwargs) + + +def _unquantify_allclose_arguments(actual, desired, rtol, atol): + from .. import units as u + + actual = u.Quantity(actual, subok=True, copy=False) + + desired = u.Quantity(desired, subok=True, copy=False) + try: + desired = desired.to(actual.unit) + except u.UnitsError: + raise u.UnitsError("Units for 'desired' ({0}) and 'actual' ({1}) " + "are not convertible" + .format(desired.unit, actual.unit)) + + if atol is None: + # by default, we assume an absolute tolerance of 0 + atol = u.Quantity(0) + else: + atol = u.Quantity(atol, subok=True, copy=False) + try: + atol = atol.to(actual.unit) + except u.UnitsError: + raise u.UnitsError("Units for 'atol' ({0}) and 'actual' ({1}) " + "are not convertible" + .format(atol.unit, actual.unit)) + + rtol = u.Quantity(rtol, subok=True, copy=False) + try: + rtol = rtol.to(u.dimensionless_unscaled) + except Exception: + raise u.UnitsError("`rtol` should be dimensionless") + + return actual.value, desired.value, rtol.value, atol.value diff --git a/astropy/tests/image_tests.py b/astropy/tests/image_tests.py new file mode 100644 index 0000000..e66ab73 --- /dev/null +++ b/astropy/tests/image_tests.py @@ -0,0 +1,10 @@ +from distutils.version import LooseVersion + +import matplotlib + +MPL_VERSION = LooseVersion(matplotlib.__version__) + +ROOT = "http://{server}/testing/astropy/2017-07-12T14:12:26.217559/{mpl_version}/" + +IMAGE_REFERENCE_DIR = (ROOT.format(server='data.astropy.org', mpl_version='1.5.x') + ',' + + ROOT.format(server='www.astropy.org/astropy-data', mpl_version='1.5.x')) diff --git a/astropy/tests/output_checker.py b/astropy/tests/output_checker.py new file mode 100644 index 0000000..238a522 --- /dev/null +++ b/astropy/tests/output_checker.py @@ -0,0 +1,186 @@ +""" +Implements a replacement for `doctest.OutputChecker` that handles certain +normalizations of Python expression output. See the docstring on +`AstropyOutputChecker` for more details. +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import doctest +import re + +import numpy as np + +from ..extern import six +from ..extern.six.moves import zip + +# Much of this code, particularly the parts of floating point handling, is +# borrowed from the SymPy project with permission. See licenses/SYMPY.rst +# for the full SymPy license. + +FIX = doctest.register_optionflag('FIX') +FLOAT_CMP = doctest.register_optionflag('FLOAT_CMP') +IGNORE_OUTPUT = doctest.register_optionflag('IGNORE_OUTPUT') +IGNORE_OUTPUT_2 = doctest.register_optionflag('IGNORE_OUTPUT_2') +IGNORE_OUTPUT_3 = doctest.register_optionflag('IGNORE_OUTPUT_3') + + +class AstropyOutputChecker(doctest.OutputChecker): + """ + - Removes u'' prefixes on string literals + - Ignores the 'L' suffix on long integers + - In Numpy dtype strings, removes the leading pipe, i.e. '|S9' -> + 'S9'. Numpy 1.7 no longer includes it in display. + - Supports the FLOAT_CMP flag, which parses floating point values + out of the output and compares their numerical values rather than their + string representation. This naturally supports complex numbers as well + (simply by comparing their real and imaginary parts separately). + """ + + _original_output_checker = doctest.OutputChecker + + _str_literal_re = re.compile( + r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) + _byteorder_re = re.compile( + r"([\'\"])[|<>]([biufcSaUV][0-9]+)([\'\"])", re.UNICODE) + _fix_32bit_re = re.compile( + r"([\'\"])([iu])[48]([\'\"])", re.UNICODE) + _long_int_re = re.compile( + r"([0-9]+)L", re.UNICODE) + + def __init__(self): + # NOTE OutputChecker is an old-style class with no __init__ method, + # so we can't call the base class version of __init__ here + + exp = r'(?:e[+-]?\d+)' + + got_floats = (r'\s*([+-]?\d+\.\d*{0}?|' + r'[+-]?\.\d+{0}?|' + r'[+-]?\d+{0}|' + r'nan|' + r'[+-]?inf)').format(exp) + + # floats in the 'want' string may contain ellipses + want_floats = got_floats + r'(\.{3})?' + + front_sep = r'\s|[*+-,<=(\[]' + back_sep = front_sep + r'|[>j)\]]' + + fbeg = r'^{}(?={}|$)'.format(got_floats, back_sep) + fmidend = r'(?<={}){}(?={}|$)'.format(front_sep, got_floats, back_sep) + self.num_got_rgx = re.compile(r'({}|{})'.format(fbeg, fmidend)) + + fbeg = r'^{}(?={}|$)'.format(want_floats, back_sep) + fmidend = r'(?<={}){}(?={}|$)'.format(front_sep, want_floats, back_sep) + self.num_want_rgx = re.compile(r'({}|{})'.format(fbeg, fmidend)) + + def do_fixes(self, want, got): + want = re.sub(self._str_literal_re, r'\1\2', want) + want = re.sub(self._byteorder_re, r'\1\2\3', want) + want = re.sub(self._fix_32bit_re, r'\1\2\3', want) + want = re.sub(self._long_int_re, r'\1', want) + + got = re.sub(self._str_literal_re, r'\1\2', got) + got = re.sub(self._byteorder_re, r'\1\2\3', got) + got = re.sub(self._fix_32bit_re, r'\1\2\3', got) + got = re.sub(self._long_int_re, r'\1', got) + + return want, got + + def normalize_floats(self, want, got, flags): + """ + Alternative to the built-in check_output that also handles parsing + float values and comparing their numeric values rather than their + string representations. + + This requires rewriting enough of the basic check_output that, when + FLOAT_CMP is enabled, it totally takes over for check_output. + """ + + # Handle the common case first, for efficiency: + # if they're string-identical, always return true. + if got == want: + return True + + # TODO parse integers as well ? + # Parse floats and compare them. If some of the parsed floats contain + # ellipses, skip the comparison. + matches = self.num_got_rgx.finditer(got) + numbers_got = [match.group(1) for match in matches] # list of strs + matches = self.num_want_rgx.finditer(want) + numbers_want = [match.group(1) for match in matches] # list of strs + if len(numbers_got) != len(numbers_want): + return False + if len(numbers_got) > 0: + nw_ = [] + for ng, nw in zip(numbers_got, numbers_want): + if '...' in nw: + nw_.append(ng) + continue + else: + nw_.append(nw) + + if not np.allclose(float(ng), float(nw), equal_nan=True): + return False + + # replace all floats in the "got" string by those from "wanted". + # TODO: can this be done more elegantly? Used to replace all with + # '{}' and then format, but this is problematic if the string + # contains other curly braces (e.g., from a dict). + got = self.num_got_rgx.sub(lambda x: nw_.pop(0), got) + + # can be used as a special sequence to signify a + # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. + if not (flags & doctest.DONT_ACCEPT_BLANKLINE): + # Replace in want with a blank line. + want = re.sub(r'(?m)^{}\s*?$'.format(re.escape(doctest.BLANKLINE_MARKER)), + '', want) + # If a line in got contains only spaces, then remove the + # spaces. + got = re.sub(r'(?m)^\s*?$', '', got) + if got == want: + return True + + # This flag causes doctest to ignore any differences in the + # contents of whitespace strings. Note that this can be used + # in conjunction with the ELLIPSIS flag. + if flags & doctest.NORMALIZE_WHITESPACE: + got = ' '.join(got.split()) + want = ' '.join(want.split()) + if got == want: + return True + + # The ELLIPSIS flag says to let the sequence "..." in `want` + # match any substring in `got`. + if flags & doctest.ELLIPSIS: + if doctest._ellipsis_match(want, got): + return True + + # We didn't find any match; return false. + return False + + def check_output(self, want, got, flags): + if (flags & IGNORE_OUTPUT or (six.PY2 and flags & IGNORE_OUTPUT_2) or + (not six.PY2 and flags & IGNORE_OUTPUT_3)): + return True + + if flags & FIX: + want, got = self.do_fixes(want, got) + + if flags & FLOAT_CMP: + return self.normalize_floats(want, got, flags) + + # Can't use super here because doctest.OutputChecker is not a + # new-style class. + return self._original_output_checker.check_output( + self, want, got, flags) + + def output_difference(self, want, got, flags): + if flags & FIX: + want, got = self.do_fixes(want, got) + + # Can't use super here because doctest.OutputChecker is not a + # new-style class. + return self._original_output_checker.output_difference( + self, want, got, flags) diff --git a/astropy/tests/pytest_plugins.py b/astropy/tests/pytest_plugins.py new file mode 100644 index 0000000..c6ff6aa --- /dev/null +++ b/astropy/tests/pytest_plugins.py @@ -0,0 +1,371 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +These plugins modify the behavior of py.test and are meant to be imported +into conftest.py in the root directory. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import __future__ + +from ..extern import six + +import ast +import datetime +import io +import locale +import math +import os +import re +import sys +import types +from pkgutil import find_loader +from collections import OrderedDict + +import pytest + +from ..config.paths import set_temp_config, set_temp_cache +from .helper import treat_deprecations_as_exceptions, ignore_warnings +from .helper import enable_deprecations_as_exceptions # pylint: disable=W0611 +from ..utils.argparse import writeable_directory +from ..utils.introspection import resolve_name + +try: + import importlib.machinery as importlib_machinery +except ImportError: # Python 2.7 + importlib_machinery = None + +pytest_plugins = ['astropy.tests.pytest_repeat'] + +_PLUGINS_PREFIX = 'astropy.extern.plugins' +for plugin in ['pytest_doctestplus', 'pytest_openfiles', 'pytest_remotedata']: + if find_loader(plugin) is None: + pytest_plugins.append('{}.{}.plugin'.format(_PLUGINS_PREFIX, plugin)) + +# these pytest hooks allow us to mark tests and run the marked tests with +# specific command line options. + + +def pytest_addoption(parser): + + parser.addoption("--config-dir", nargs='?', type=writeable_directory, + help="specify directory for storing and retrieving the " + "Astropy configuration during tests (default is " + "to use a temporary directory created by the test " + "runner); be aware that using an Astropy config " + "file other than the default can cause some tests " + "to fail unexpectedly") + + parser.addoption("--cache-dir", nargs='?', type=writeable_directory, + help="specify directory for storing and retrieving the " + "Astropy cache during tests (default is " + "to use a temporary directory created by the test " + "runner)") + parser.addini("config_dir", + "specify directory for storing and retrieving the " + "Astropy configuration during tests (default is " + "to use a temporary directory created by the test " + "runner); be aware that using an Astropy config " + "file other than the default can cause some tests " + "to fail unexpectedly", default=None) + + parser.addini("cache_dir", + "specify directory for storing and retrieving the " + "Astropy cache during tests (default is " + "to use a temporary directory created by the test " + "runner)", default=None) + + +def pytest_configure(config): + treat_deprecations_as_exceptions() + +def pytest_runtest_setup(item): + config_dir = item.config.getini('config_dir') + cache_dir = item.config.getini('cache_dir') + + # Command-line options can override, however + config_dir = item.config.getoption('config_dir') or config_dir + cache_dir = item.config.getoption('cache_dir') or cache_dir + + # We can't really use context managers directly in py.test (although + # py.test 2.7 adds the capability), so this may look a bit hacky + if config_dir: + item.set_temp_config = set_temp_config(config_dir) + item.set_temp_config.__enter__() + if cache_dir: + item.set_temp_cache = set_temp_cache(cache_dir) + item.set_temp_cache.__enter__() + + + +def pytest_runtest_teardown(item, nextitem): + if hasattr(item, 'set_temp_cache'): + item.set_temp_cache.__exit__() + if hasattr(item, 'set_temp_config'): + item.set_temp_config.__exit__() + + +PYTEST_HEADER_MODULES = OrderedDict([('Numpy', 'numpy'), + ('Scipy', 'scipy'), + ('Matplotlib', 'matplotlib'), + ('h5py', 'h5py'), + ('Pandas', 'pandas')]) + +# This always returns with Astropy's version +from .. import __version__ + +TESTED_VERSIONS = OrderedDict([('Astropy', __version__)]) + + +def pytest_report_header(config): + + try: + stdoutencoding = sys.stdout.encoding or 'ascii' + except AttributeError: + stdoutencoding = 'ascii' + + if six.PY2: + args = [x.decode('utf-8') for x in config.args] + else: + args = config.args + + # TESTED_VERSIONS can contain the affiliated package version, too + if len(TESTED_VERSIONS) > 1: + for pkg, version in TESTED_VERSIONS.items(): + if pkg != 'Astropy': + s = "\nRunning tests with {0} version {1}.\n".format( + pkg, version) + else: + s = "\nRunning tests with Astropy version {0}.\n".format( + TESTED_VERSIONS['Astropy']) + + # Per https://github.com/astropy/astropy/pull/4204, strip the rootdir from + # each directory argument + if hasattr(config, 'rootdir'): + rootdir = str(config.rootdir) + if not rootdir.endswith(os.sep): + rootdir += os.sep + + dirs = [arg[len(rootdir):] if arg.startswith(rootdir) else arg + for arg in args] + else: + dirs = args + + s += "Running tests in {0}.\n\n".format(" ".join(dirs)) + + s += "Date: {0}\n\n".format(datetime.datetime.now().isoformat()[:19]) + + from platform import platform + plat = platform() + if isinstance(plat, bytes): + plat = plat.decode(stdoutencoding, 'replace') + s += "Platform: {0}\n\n".format(plat) + s += "Executable: {0}\n\n".format(sys.executable) + s += "Full Python Version: \n{0}\n\n".format(sys.version) + + s += "encodings: sys: {0}, locale: {1}, filesystem: {2}".format( + sys.getdefaultencoding(), + locale.getpreferredencoding(), + sys.getfilesystemencoding()) + if sys.version_info < (3, 3, 0): + s += ", unicode bits: {0}".format( + int(math.log(sys.maxunicode, 2))) + s += '\n' + + s += "byteorder: {0}\n".format(sys.byteorder) + s += "float info: dig: {0.dig}, mant_dig: {0.dig}\n\n".format( + sys.float_info) + + for module_display, module_name in six.iteritems(PYTEST_HEADER_MODULES): + try: + with ignore_warnings(DeprecationWarning): + module = resolve_name(module_name) + except ImportError: + s += "{0}: not available\n".format(module_display) + else: + try: + version = module.__version__ + except AttributeError: + version = 'unknown (no __version__ attribute)' + s += "{0}: {1}\n".format(module_display, version) + + special_opts = ["remote_data", "pep8"] + opts = [] + for op in special_opts: + op_value = getattr(config.option, op, None) + if op_value: + if isinstance(op_value, six.string_types): + op = ': '.join((op, op_value)) + opts.append(op) + if opts: + s += "Using Astropy options: {0}.\n".format(", ".join(opts)) + + if six.PY2: + s = s.encode(stdoutencoding, 'replace') + + return s + + +def pytest_pycollect_makemodule(path, parent): + # This is where we set up testing both with and without + # from __future__ import unicode_literals + + # On Python 3, just do the regular thing that py.test does + if six.PY2: + return Pair(path, parent) + else: + return pytest.Module(path, parent) + + +class Pair(pytest.File): + """ + This class treats a given test .py file as a pair of .py files + where one has __future__ unicode_literals and the other does not. + """ + + def collect(self): + # First, just do the regular import of the module to make + # sure it's sane and valid. This block is copied directly + # from py.test + try: + mod = self.fspath.pyimport(ensuresyspath=True) + except SyntaxError: + import py + excinfo = py.code.ExceptionInfo() + raise self.CollectError(excinfo.getrepr(style="short")) + except self.fspath.ImportMismatchError: + e = sys.exc_info()[1] + raise self.CollectError( + "import file mismatch:\n" + "imported module {!r} has this __file__ attribute:\n" + " {}\n" + "which is not the same as the test file we want to collect:\n" + " {}\n" + "HINT: remove __pycache__ / .pyc files and/or use a " + "unique basename for your test file modules".format(e.args)) + + # Now get the file's content. + with io.open(six.text_type(self.fspath), 'rb') as fd: + content = fd.read() + + # If the file contains the special marker, only test it both ways. + if b'TEST_UNICODE_LITERALS' in content: + # Return the file in both unicode_literal-enabled and disabled forms + return [ + UnicodeLiteralsModule(mod.__name__, content, self.fspath, self), + NoUnicodeLiteralsModule(mod.__name__, content, self.fspath, self) + ] + else: + return [pytest.Module(self.fspath, self)] + + +_RE_FUTURE_IMPORTS = re.compile(br'from __future__ import ((\(.*?\))|([^\n]+))', + flags=re.DOTALL) + + +class ModifiedModule(pytest.Module): + def __init__(self, mod_name, content, path, parent): + self.mod_name = mod_name + self.content = content + super(ModifiedModule, self).__init__(path, parent) + + def _importtestmodule(self): + # We have to remove the __future__ statements *before* parsing + # with compile, otherwise the flags are ignored. + content = re.sub(_RE_FUTURE_IMPORTS, b'\n', self.content) + + new_mod = types.ModuleType(self.mod_name) + new_mod.__file__ = six.text_type(self.fspath) + + if hasattr(self, '_transform_ast'): + # ast.parse doesn't let us hand-select the __future__ + # statements, but built-in compile, with the PyCF_ONLY_AST + # flag does. + tree = compile( + content, six.text_type(self.fspath), 'exec', + self.flags | ast.PyCF_ONLY_AST, True) + tree = self._transform_ast(tree) + # Now that we've transformed the tree, recompile it + code = compile( + tree, six.text_type(self.fspath), 'exec') + else: + # If we don't need to transform the AST, we can skip + # parsing/compiling in two steps + code = compile( + content, six.text_type(self.fspath), 'exec', + self.flags, True) + + pwd = os.getcwd() + try: + os.chdir(os.path.dirname(six.text_type(self.fspath))) + six.exec_(code, new_mod.__dict__) + finally: + os.chdir(pwd) + self.config.pluginmanager.consider_module(new_mod) + return new_mod + + +class UnicodeLiteralsModule(ModifiedModule): + flags = ( + __future__.absolute_import.compiler_flag | + __future__.division.compiler_flag | + __future__.print_function.compiler_flag | + __future__.unicode_literals.compiler_flag + ) + + +class NoUnicodeLiteralsModule(ModifiedModule): + flags = ( + __future__.absolute_import.compiler_flag | + __future__.division.compiler_flag | + __future__.print_function.compiler_flag + ) + + def _transform_ast(self, tree): + # When unicode_literals is disabled, we still need to convert any + # byte string containing non-ascii characters into a Unicode string. + # If it doesn't decode as utf-8, we assume it's some other kind + # of byte string and just ultimately leave it alone. + + # Note that once we drop support for Python 3.2, we should be + # able to remove this transformation and just put explicit u'' + # prefixes in the test source code. + + class NonAsciiLiteral(ast.NodeTransformer): + def visit_Str(self, node): + s = node.s + if isinstance(s, bytes): + try: + s.decode('ascii') + except UnicodeDecodeError: + try: + s = s.decode('utf-8') + except UnicodeDecodeError: + pass + else: + return ast.copy_location(ast.Str(s=s), node) + return node + return NonAsciiLiteral().visit(tree) + + +def pytest_terminal_summary(terminalreporter): + """Output a warning to IPython users in case any tests failed.""" + + try: + get_ipython() + except NameError: + return + + if not terminalreporter.stats.get('failed'): + # Only issue the warning when there are actually failures + return + + terminalreporter.ensure_newline() + terminalreporter.write_line( + 'Some tests are known to fail when run from the IPython prompt; ' + 'especially, but not limited to tests involving logging and warning ' + 'handling. Unless you are certain as to the cause of the failure, ' + 'please check that the failure occurs outside IPython as well. See ' + 'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-' + 'tests-when-running-the-tests-in-ipython for more information.', + yellow=True, bold=True) diff --git a/astropy/tests/pytest_repeat.py b/astropy/tests/pytest_repeat.py new file mode 100644 index 0000000..5705a8f --- /dev/null +++ b/astropy/tests/pytest_repeat.py @@ -0,0 +1,27 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +These plugins modify the behavior of py.test and are meant to be imported +into conftest.py in the root directory. +""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from ..extern.six.moves import range + + +def pytest_addoption(parser): + + parser.addoption('--repeat', action='store', + help='Number of times to repeat each test') + + +def pytest_generate_tests(metafunc): + + # If the repeat option is set, we add a fixture for the repeat count and + # parametrize the tests over the repeats. Solution adapted from: + # http://stackoverflow.com/q/21764473/180783 + + if metafunc.config.option.repeat is not None: + count = int(metafunc.config.option.repeat) + metafunc.fixturenames.append('tmp_ct') + metafunc.parametrize('tmp_ct', range(count)) diff --git a/astropy/tests/runner.py b/astropy/tests/runner.py new file mode 100644 index 0000000..ce31981 --- /dev/null +++ b/astropy/tests/runner.py @@ -0,0 +1,525 @@ +"""Implements the Astropy TestRunner which is a thin wrapper around py.test.""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import inspect +import os +import copy +import shlex +import sys +import tempfile +import warnings +from collections import OrderedDict + +from ..config.paths import set_temp_config, set_temp_cache +from ..extern import six +from ..utils import wraps, find_current_module +from ..utils.exceptions import AstropyWarning, AstropyDeprecationWarning + +__all__ = ['TestRunner', 'TestRunnerBase', 'keyword'] + + +class keyword(object): + """ + A decorator to mark a method as keyword argument for the ``TestRunner``. + + Parameters + ---------- + default_value : `object` + The default value for the keyword argument. (Default: `None`) + + priority : `int` + keyword argument methods are executed in order of descending priority. + """ + + def __init__(self, default_value=None, priority=0): + self.default_value = default_value + self.priority = priority + + def __call__(self, f): + def keyword(*args, **kwargs): + return f(*args, **kwargs) + + keyword._default_value = self.default_value + keyword._priority = self.priority + # Set __doc__ explicitly here rather than using wraps because we want + # to keep the function name as keyword so we can inspect it later. + keyword.__doc__ = f.__doc__ + + return keyword + + +class TestRunnerBase(object): + """ + The base class for the TestRunner. + + A test runner can be constructed by creating a subclass of this class and + defining 'keyword' methods. These are methods that have the + `~astropy.tests.runner.keyword` decorator, these methods are used to + construct allowed keyword arguments to the + `~astropy.tests.runner.TestRunnerBase.run_tests` method as a way to allow + customization of individual keyword arguments (and associated logic) + without having to re-implement the whole + `~astropy.tests.runner.TestRunnerBase.run_tests` method. + + Examples + -------- + + A simple keyword method:: + + class MyRunner(TestRunnerBase): + + @keyword('default_value'): + def spam(self, spam, kwargs): + \"\"\" + spam : `str` + The parameter description for the run_tests docstring. + \"\"\" + # Return value must be a list with a CLI parameter for pytest. + return ['--spam={}'.format(spam)] + """ + + def __init__(self, base_path): + self.base_path = os.path.abspath(base_path) + + def __new__(cls, *args, **kwargs): + # Before constructing the class parse all the methods that have been + # decorated with ``keyword``. + + # The objective of this method is to construct a default set of keyword + # arguments to the ``run_tests`` method. It does this by inspecting the + # methods of the class for functions with the name ``keyword`` which is + # the name of the decorator wrapping function. Once it has created this + # dictionary, it also formats the docstring of ``run_tests`` to be + # comprised of the docstrings for the ``keyword`` methods. + + # To add a keyword argument to the ``run_tests`` method, define a new + # method decorated with ``@keyword`` and with the ``self, name, kwargs`` + # signature. + # Get all 'function' members as the wrapped methods are functions + if six.PY2: + functions = inspect.getmembers(cls, predicate=inspect.ismethod) + else: + functions = inspect.getmembers(cls, predicate=inspect.isfunction) + + # Filter out anything that's not got the name 'keyword' + keywords = filter(lambda func: func[1].__name__ == 'keyword', functions) + # Sort all keywords based on the priority flag. + sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True) + + cls.keywords = OrderedDict() + doc_keywords = "" + for name, func in sorted_keywords: + # Here we test if the function has been overloaded to return + # NotImplemented which is the way to disable arguments on + # subclasses. If it has been disabled we need to remove it from the + # default keywords dict. We do it in the try except block because + # we do not have access to an instance of the class, so this is + # going to error unless the method is just doing `return + # NotImplemented`. + try: + # Second argument is False, as it is normally a bool. + # The other two are placeholders for objects. + if func(None, False, None) is NotImplemented: + continue + except Exception: + pass + + # Construct the default kwargs dict and docstring + cls.keywords[name] = func._default_value + if func.__doc__: + doc_keywords += ' '*8 + doc_keywords += func.__doc__.strip() + doc_keywords += '\n\n' + + if six.PY2: + cls.run_tests.__func__.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords) + else: + cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords) + + return super(TestRunnerBase, cls).__new__(cls) + + def _generate_args(self, **kwargs): + # Update default values with passed kwargs + # but don't modify the defaults + keywords = copy.deepcopy(self.keywords) + keywords.update(kwargs) + # Iterate through the keywords (in order of priority) + args = [] + for keyword in keywords.keys(): + func = getattr(self, keyword) + result = func(keywords[keyword], keywords) + + # Allow disabaling of options in a subclass + if result is NotImplemented: + raise TypeError("run_tests() got an unexpected keyword argument {}".format(keyword)) + + # keyword methods must return a list + if not isinstance(result, list): + raise TypeError("{} keyword method must return a list".format(keyword)) + + args += result + + if six.PY2: + args = [x.encode('utf-8') for x in args] + + return args + + RUN_TESTS_DOCSTRING = \ + """ + Run the tests for the package. + + Parameters + ---------- + {keywords} + See Also + -------- + pytest.main : This method builds arguments for and then calls this function. + """ + + def run_tests(self, **kwargs): + # The docstring for this method is defined as a class variable. + # This allows it to be built for each subclass in __new__. + + # Don't import pytest until it's actually needed to run the tests + import pytest + + # Raise error for undefined kwargs + allowed_kwargs = set(self.keywords.keys()) + passed_kwargs = set(kwargs.keys()) + if not passed_kwargs.issubset(allowed_kwargs): + wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs)) + raise TypeError("run_tests() got an unexpected keyword argument {}".format(wrong_kwargs[0])) + + args = self._generate_args(**kwargs) + + # override the config locations to not make a new directory nor use + # existing cache or config + astropy_config = tempfile.mkdtemp('astropy_config') + astropy_cache = tempfile.mkdtemp('astropy_cache') + + # Have to use nested with statements for cross-Python support + # Note, using these context managers here is superfluous if the + # config_dir or cache_dir options to py.test are in use, but it's + # also harmless to nest the contexts + with set_temp_config(astropy_config, delete=True): + with set_temp_cache(astropy_cache, delete=True): + return pytest.main(args=args, plugins=self.keywords['plugins']) + + @classmethod + def make_test_runner_in(cls, path): + """ + Constructs a `TestRunner` to run in the given path, and returns a + ``test()`` function which takes the same arguments as + `TestRunner.run_tests`. + + The returned ``test()`` function will be defined in the module this + was called from. This is used to implement the ``astropy.test()`` + function (or the equivalent for affiliated packages). + """ + + runner = cls(path) + + @wraps(runner.run_tests, ('__doc__',), exclude_args=('self',)) + def test(**kwargs): + return runner.run_tests(**kwargs) + + module = find_current_module(2) + if module is not None: + test.__module__ = module.__name__ + + # A somewhat unusual hack, but delete the attached __wrapped__ + # attribute--although this is normally used to tell if the function + # was wrapped with wraps, on some version of Python this is also + # used to determine the signature to display in help() which is + # not useful in this case. We don't really care in this case if the + # function was wrapped either + if hasattr(test, '__wrapped__'): + del test.__wrapped__ + + return test + + +class TestRunner(TestRunnerBase): + """ + A test runner for astropy tests + """ + + # Increase priority so this warning is displayed first. + @keyword(priority=1000) + def coverage(self, coverage, kwargs): + if coverage: + warnings.warn( + "The coverage option is ignored on run_tests, since it " + "can not be made to work in that context. Use " + "'python setup.py test --coverage' instead.", + AstropyWarning) + + return [] + + # test_path depends on self.package_path so make sure this runs before + # test_path. + @keyword(priority=1) + def package(self, package, kwargs): + """ + package : str, optional + The name of a specific package to test, e.g. 'io.fits' or 'utils'. + If nothing is specified all default Astropy tests are run. + """ + if package is None: + self.package_path = self.base_path + else: + self.package_path = os.path.join(self.base_path, + package.replace('.', os.path.sep)) + + if not os.path.isdir(self.package_path): + raise ValueError('Package not found: {0}'.format(package)) + + if not kwargs['test_path']: + return [self.package_path] + + return [] + + @keyword() + def test_path(self, test_path, kwargs): + """ + test_path : str, optional + Specify location to test by path. May be a single file or + directory. Must be specified absolutely or relative to the + calling directory. + """ + all_args = [] + # Ensure that the package kwarg has been run. + self.package(kwargs['package'], kwargs) + if test_path: + base, ext = os.path.splitext(test_path) + + if ext in ('.rst', ''): + if kwargs['docs_path'] is None: + # This shouldn't happen from "python setup.py test" + raise ValueError( + "Can not test .rst files without a docs_path " + "specified.") + + abs_docs_path = os.path.abspath(kwargs['docs_path']) + abs_test_path = os.path.abspath( + os.path.join(abs_docs_path, os.pardir, test_path)) + + common = os.path.commonprefix((abs_docs_path, abs_test_path)) + + if os.path.exists(abs_test_path) and common == abs_docs_path: + # Since we aren't testing any Python files within + # the astropy tree, we need to forcibly load the + # astropy py.test plugins, and then turn on the + # doctest_rst plugin. + all_args.extend(['-p', 'astropy.tests.pytest_plugins', + '--doctest-rst']) + test_path = abs_test_path + + if not (os.path.isdir(test_path) or ext in ('.py', '.rst')): + raise ValueError("Test path must be a directory or a path to " + "a .py or .rst file") + + return all_args + [test_path] + + return [] + + @keyword() + def args(self, args, kwargs): + """ + args : str, optional + Additional arguments to be passed to ``pytest.main`` in the ``args`` + keyword argument. + """ + if args: + return shlex.split(args, posix=not sys.platform.startswith('win')) + + return [] + + @keyword() + def plugins(self, plugins, kwargs): + """ + plugins : list, optional + Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword + argument. + """ + return [] + + @keyword() + def verbose(self, verbose, kwargs): + """ + verbose : bool, optional + Convenience option to turn on verbose output from py.test. Passing + True is the same as specifying ``-v`` in ``args``. + """ + if verbose: + return ['-v'] + + return [] + + @keyword() + def pastebin(self, pastebin, kwargs): + """ + pastebin : ('failed', 'all', None), optional + Convenience option for turning on py.test pastebin output. Set to + 'failed' to upload info for failed tests, or 'all' to upload info + for all tests. + """ + if pastebin is not None: + if pastebin in ['failed', 'all']: + return ['--pastebin={0}'.format(pastebin)] + else: + raise ValueError("pastebin should be 'failed' or 'all'") + + return [] + + @keyword(default_value='none') + def remote_data(self, remote_data, kwargs): + """ + remote_data : {'none', 'astropy', 'any'}, optional + Controls whether to run tests marked with @remote_data. This can be + set to run no tests with remote data (``none``), only ones that use + data from http://data.astropy.org (``astropy``), or all tests that + use remote data (``any``). The default is ``none``. + """ + + if remote_data is True: + remote_data = 'any' + elif remote_data is False: + remote_data = 'none' + elif remote_data not in ('none', 'astropy', 'any'): + warnings.warn("The remote_data option should be one of " + "none/astropy/any (found {0}). For backward-compatibility, " + "assuming 'any', but you should change the option to be " + "one of the supported ones to avoid issues in " + "future.".format(remote_data), + AstropyDeprecationWarning) + remote_data = 'any' + + return ['--remote-data={0}'.format(remote_data)] + + @keyword() + def pep8(self, pep8, kwargs): + """ + pep8 : bool, optional + Turn on PEP8 checking via the pytest-pep8 plugin and disable normal + tests. Same as specifying ``--pep8 -k pep8`` in ``args``. + """ + if pep8: + try: + import pytest_pep8 # pylint: disable=W0611 + except ImportError: + raise ImportError('PEP8 checking requires pytest-pep8 plugin: ' + 'http://pypi.python.org/pypi/pytest-pep8') + else: + return ['--pep8', '-k', 'pep8'] + + return [] + + @keyword() + def pdb(self, pdb, kwargs): + """ + pdb : bool, optional + Turn on PDB post-mortem analysis for failing tests. Same as + specifying ``--pdb`` in ``args``. + """ + if pdb: + return ['--pdb'] + return [] + + @keyword() + def open_files(self, open_files, kwargs): + """ + open_files : bool, optional + Fail when any tests leave files open. Off by default, because + this adds extra run time to the test suite. Requires the + ``psutil`` package. + """ + if open_files: + if kwargs['parallel'] != 0: + raise SystemError( + "open file detection may not be used in conjunction with " + "parallel testing.") + + try: + import psutil # pylint: disable=W0611 + except ImportError: + raise SystemError( + "open file detection requested, but psutil package " + "is not installed.") + + return ['--open-files'] + + print("Checking for unclosed files") + + return [] + + @keyword(0) + def parallel(self, parallel, kwargs): + """ + parallel : int, optional + When provided, run the tests in parallel on the specified + number of CPUs. If parallel is negative, it will use the all + the cores on the machine. Requires the ``pytest-xdist`` plugin. + """ + if parallel != 0: + try: + from xdist import plugin # noqa + except ImportError: + raise SystemError( + "running tests in parallel requires the pytest-xdist package") + + return ['-n', six.text_type(parallel)] + + return [] + + @keyword() + def docs_path(self, docs_path, kwargs): + """ + docs_path : str, optional + The path to the documentation .rst files. + """ + if docs_path is not None and not kwargs['skip_docs']: + if kwargs['package'] is not None: + docs_path = os.path.join( + docs_path, kwargs['package'].replace('.', os.path.sep)) + if not os.path.exists(docs_path): + warnings.warn( + "Can not test .rst docs, since docs path " + "({0}) does not exist.".format(docs_path)) + docs_path = None + if docs_path and not kwargs['skip_docs'] and not kwargs['test_path']: + return [docs_path, '--doctest-rst'] + + return [] + + @keyword() + def skip_docs(self, skip_docs, kwargs): + """ + skip_docs : `bool`, optional + When `True`, skips running the doctests in the .rst files. + """ + # Skip docs is a bool used by docs_path only. + return [] + + @keyword() + def repeat(self, repeat, kwargs): + """ + repeat : `int`, optional + If set, specifies how many times each test should be run. This is + useful for diagnosing sporadic failures. + """ + if repeat: + return ['--repeat={0}'.format(repeat)] + + return [] + + # Override run_tests for astropy-specific fixes + def run_tests(self, **kwargs): + + # This prevents cyclical import problems that make it + # impossible to test packages that define Table types on their + # own. + from ..table import Table # pylint: disable=W0611 + + return super(TestRunner, self).run_tests(**kwargs) diff --git a/astropy/tests/setup_package.py b/astropy/tests/setup_package.py new file mode 100644 index 0000000..7081eb8 --- /dev/null +++ b/astropy/tests/setup_package.py @@ -0,0 +1,11 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + + +def get_package_data(): + return { + 'astropy.tests': ['coveragerc'], + 'astropy.tests.tests': ['data/open_file_detection.txt']} + + +def requires_2to3(): + return False diff --git a/astropy/tests/test_logger.py b/astropy/tests/test_logger.py new file mode 100644 index 0000000..2b380b8 --- /dev/null +++ b/astropy/tests/test_logger.py @@ -0,0 +1,489 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import imp +import sys +import warnings + +import pytest + +from .helper import catch_warnings +from .. import log +from ..logger import LoggingError, conf +from ..utils.exceptions import AstropyWarning, AstropyUserWarning + + +# Save original values of hooks. These are not the system values, but the +# already overwritten values since the logger already gets imported before +# this file gets executed. +_excepthook = sys.__excepthook__ +_showwarning = warnings.showwarning + + +try: + ip = get_ipython() +except NameError: + ip = None + + +def setup_function(function): + + # Reset modules to default + imp.reload(warnings) + imp.reload(sys) + + # Reset internal original hooks + log._showwarning_orig = None + log._excepthook_orig = None + + # Set up the logger + log._set_defaults() + + # Reset hooks + if log.warnings_logging_enabled(): + log.disable_warnings_logging() + if log.exception_logging_enabled(): + log.disable_exception_logging() + + +teardown_module = setup_function + + +def test_warnings_logging_disable_no_enable(): + with pytest.raises(LoggingError) as e: + log.disable_warnings_logging() + assert e.value.args[0] == 'Warnings logging has not been enabled' + + +def test_warnings_logging_enable_twice(): + log.enable_warnings_logging() + with pytest.raises(LoggingError) as e: + log.enable_warnings_logging() + assert e.value.args[0] == 'Warnings logging has already been enabled' + + +def test_warnings_logging_overridden(): + log.enable_warnings_logging() + warnings.showwarning = lambda: None + with pytest.raises(LoggingError) as e: + log.disable_warnings_logging() + assert e.value.args[0] == 'Cannot disable warnings logging: warnings.showwarning was not set by this logger, or has been overridden' + + +def test_warnings_logging(): + + # Without warnings logging + with catch_warnings() as warn_list: + with log.log_to_list() as log_list: + warnings.warn("This is a warning", AstropyUserWarning) + assert len(log_list) == 0 + assert len(warn_list) == 1 + assert warn_list[0].message.args[0] == "This is a warning" + + # With warnings logging + with catch_warnings() as warn_list: + log.enable_warnings_logging() + with log.log_to_list() as log_list: + warnings.warn("This is a warning", AstropyUserWarning) + log.disable_warnings_logging() + assert len(log_list) == 1 + assert len(warn_list) == 0 + assert log_list[0].levelname == 'WARNING' + assert log_list[0].message.startswith('This is a warning') + assert log_list[0].origin == 'astropy.tests.test_logger' + + # With warnings logging (differentiate between Astropy and non-Astropy) + with catch_warnings() as warn_list: + log.enable_warnings_logging() + with log.log_to_list() as log_list: + warnings.warn("This is a warning", AstropyUserWarning) + warnings.warn("This is another warning, not from Astropy") + log.disable_warnings_logging() + assert len(log_list) == 1 + assert len(warn_list) == 1 + assert log_list[0].levelname == 'WARNING' + assert log_list[0].message.startswith('This is a warning') + assert log_list[0].origin == 'astropy.tests.test_logger' + assert warn_list[0].message.args[0] == "This is another warning, not from Astropy" + + # Without warnings logging + with catch_warnings() as warn_list: + with log.log_to_list() as log_list: + warnings.warn("This is a warning", AstropyUserWarning) + assert len(log_list) == 0 + assert len(warn_list) == 1 + assert warn_list[0].message.args[0] == "This is a warning" + + +def test_warnings_logging_with_custom_class(): + class CustomAstropyWarningClass(AstropyWarning): + pass + + # With warnings logging + with catch_warnings() as warn_list: + log.enable_warnings_logging() + with log.log_to_list() as log_list: + warnings.warn("This is a warning", CustomAstropyWarningClass) + log.disable_warnings_logging() + assert len(log_list) == 1 + assert len(warn_list) == 0 + assert log_list[0].levelname == 'WARNING' + assert log_list[0].message.startswith('CustomAstropyWarningClass: This is a warning') + assert log_list[0].origin == 'astropy.tests.test_logger' + + +def test_warning_logging_with_io_votable_warning(): + from ..io.votable.exceptions import W02, vo_warn + + with catch_warnings() as warn_list: + log.enable_warnings_logging() + with log.log_to_list() as log_list: + vo_warn(W02, ('a', 'b')) + log.disable_warnings_logging() + assert len(log_list) == 1 + assert len(warn_list) == 0 + assert log_list[0].levelname == 'WARNING' + x = log_list[0].message.startswith(("W02: ?:?:?: W02: a attribute 'b' is " + "invalid. Must be a standard XML id")) + assert x + assert log_list[0].origin == 'astropy.tests.test_logger' + + +def test_import_error_in_warning_logging(): + """ + Regression test for https://github.com/astropy/astropy/issues/2671 + + This test actually puts a goofy fake module into ``sys.modules`` to test + this problem. + """ + + class FakeModule(object): + def __getattr__(self, attr): + raise ImportError('_showwarning should ignore any exceptions ' + 'here') + + log.enable_warnings_logging() + + sys.modules[''] = FakeModule() + try: + warnings.showwarning(AstropyWarning('Regression test for #2671'), + AstropyWarning, '', 1) + finally: + del sys.modules[''] + + +def test_exception_logging_disable_no_enable(): + with pytest.raises(LoggingError) as e: + log.disable_exception_logging() + assert e.value.args[0] == 'Exception logging has not been enabled' + + +def test_exception_logging_enable_twice(): + log.enable_exception_logging() + with pytest.raises(LoggingError) as e: + log.enable_exception_logging() + assert e.value.args[0] == 'Exception logging has already been enabled' + + +# You can't really override the exception handler in IPython this way, so +# this test doesn't really make sense in the IPython context. +@pytest.mark.skipif(str("ip is not None")) +def test_exception_logging_overridden(): + log.enable_exception_logging() + sys.excepthook = lambda etype, evalue, tb: None + with pytest.raises(LoggingError) as e: + log.disable_exception_logging() + assert e.value.args[0] == 'Cannot disable exception logging: sys.excepthook was not set by this logger, or has been overridden' + + +@pytest.mark.xfail(str("ip is not None")) +def test_exception_logging(): + + # Without exception logging + try: + with log.log_to_list() as log_list: + raise Exception("This is an Exception") + except Exception as exc: + sys.excepthook(*sys.exc_info()) + assert exc.args[0] == "This is an Exception" + else: + assert False # exception should have been raised + assert len(log_list) == 0 + + # With exception logging + try: + log.enable_exception_logging() + with log.log_to_list() as log_list: + raise Exception("This is an Exception") + except Exception as exc: + sys.excepthook(*sys.exc_info()) + assert exc.args[0] == "This is an Exception" + else: + assert False # exception should have been raised + assert len(log_list) == 1 + assert log_list[0].levelname == 'ERROR' + assert log_list[0].message.startswith('Exception: This is an Exception') + assert log_list[0].origin == 'astropy.tests.test_logger' + + # Without exception logging + log.disable_exception_logging() + try: + with log.log_to_list() as log_list: + raise Exception("This is an Exception") + except Exception as exc: + sys.excepthook(*sys.exc_info()) + assert exc.args[0] == "This is an Exception" + else: + assert False # exception should have been raised + assert len(log_list) == 0 + + +@pytest.mark.xfail(str("ip is not None")) +def test_exception_logging_origin(): + # The point here is to get an exception raised from another location + # and make sure the error's origin is reported correctly + + from ..utils.collections import HomogeneousList + + l = HomogeneousList(int) + try: + log.enable_exception_logging() + with log.log_to_list() as log_list: + l.append('foo') + except TypeError as exc: + sys.excepthook(*sys.exc_info()) + assert exc.args[0].startswith( + "homogeneous list must contain only objects of type ") + else: + assert False + assert len(log_list) == 1 + assert log_list[0].levelname == 'ERROR' + assert log_list[0].message.startswith( + "TypeError: homogeneous list must contain only objects of type ") + assert log_list[0].origin == 'astropy.utils.collections' + + +@pytest.mark.skipif("sys.version_info[:2] >= (3, 5)", + reason="Infinite recursion on Python 3.5") +@pytest.mark.xfail(str("ip is not None")) +def test_exception_logging_argless_exception(): + """ + Regression test for a crash that occurred on Python 3 when logging an + exception that was instantiated with no arguments (no message, etc.) + + Regression test for https://github.com/astropy/astropy/pull/4056 + """ + + try: + log.enable_exception_logging() + with log.log_to_list() as log_list: + raise Exception() + except Exception as exc: + sys.excepthook(*sys.exc_info()) + else: + assert False # exception should have been raised + assert len(log_list) == 1 + assert log_list[0].levelname == 'ERROR' + # Pytest changed the format of its error message sometime between 3.1 and + # 3.3. Using ``startswith`` lets us be general enough to handle all cases. + assert log_list[0].message.startswith('Exception') + assert log_list[0].origin == 'astropy.tests.test_logger' + + +@pytest.mark.parametrize(('level'), [None, 'DEBUG', 'INFO', 'WARN', 'ERROR']) +def test_log_to_list(level): + + orig_level = log.level + + try: + if level is not None: + log.setLevel(level) + + with log.log_to_list() as log_list: + log.error("Error message") + log.warning("Warning message") + log.info("Information message") + log.debug("Debug message") + finally: + log.setLevel(orig_level) + + if level is None: + # The log level *should* be set to whatever it was in the config + level = conf.log_level + + # Check list length + if level == 'DEBUG': + assert len(log_list) == 4 + elif level == 'INFO': + assert len(log_list) == 3 + elif level == 'WARN': + assert len(log_list) == 2 + elif level == 'ERROR': + assert len(log_list) == 1 + + # Check list content + + assert log_list[0].levelname == 'ERROR' + assert log_list[0].message.startswith('Error message') + assert log_list[0].origin == 'astropy.tests.test_logger' + + if len(log_list) >= 2: + assert log_list[1].levelname == 'WARNING' + assert log_list[1].message.startswith('Warning message') + assert log_list[1].origin == 'astropy.tests.test_logger' + + if len(log_list) >= 3: + assert log_list[2].levelname == 'INFO' + assert log_list[2].message.startswith('Information message') + assert log_list[2].origin == 'astropy.tests.test_logger' + + if len(log_list) >= 4: + assert log_list[3].levelname == 'DEBUG' + assert log_list[3].message.startswith('Debug message') + assert log_list[3].origin == 'astropy.tests.test_logger' + + +def test_log_to_list_level(): + + with log.log_to_list(filter_level='ERROR') as log_list: + log.error("Error message") + log.warning("Warning message") + + assert len(log_list) == 1 and log_list[0].levelname == 'ERROR' + + +def test_log_to_list_origin1(): + + with log.log_to_list(filter_origin='astropy.tests') as log_list: + log.error("Error message") + log.warning("Warning message") + + assert len(log_list) == 2 + + +def test_log_to_list_origin2(): + + with log.log_to_list(filter_origin='astropy.wcs') as log_list: + log.error("Error message") + log.warning("Warning message") + + assert len(log_list) == 0 + + +@pytest.mark.parametrize(('level'), [None, 'DEBUG', 'INFO', 'WARN', 'ERROR']) +def test_log_to_file(tmpdir, level): + + local_path = tmpdir.join('test.log') + log_file = local_path.open('wb') + log_path = str(local_path.realpath()) + orig_level = log.level + + try: + if level is not None: + log.setLevel(level) + + with log.log_to_file(log_path): + log.error("Error message") + log.warning("Warning message") + log.info("Information message") + log.debug("Debug message") + + log_file.close() + finally: + log.setLevel(orig_level) + + log_file = local_path.open('rb') + log_entries = log_file.readlines() + log_file.close() + + if level is None: + # The log level *should* be set to whatever it was in the config + level = conf.log_level + + # Check list length + if level == 'DEBUG': + assert len(log_entries) == 4 + elif level == 'INFO': + assert len(log_entries) == 3 + elif level == 'WARN': + assert len(log_entries) == 2 + elif level == 'ERROR': + assert len(log_entries) == 1 + + # Check list content + + assert eval(log_entries[0].strip())[-3:] == ( + 'astropy.tests.test_logger', 'ERROR', 'Error message') + + if len(log_entries) >= 2: + assert eval(log_entries[1].strip())[-3:] == ( + 'astropy.tests.test_logger', 'WARNING', 'Warning message') + + if len(log_entries) >= 3: + assert eval(log_entries[2].strip())[-3:] == ( + 'astropy.tests.test_logger', 'INFO', 'Information message') + + if len(log_entries) >= 4: + assert eval(log_entries[3].strip())[-3:] == ( + 'astropy.tests.test_logger', 'DEBUG', 'Debug message') + + +def test_log_to_file_level(tmpdir): + + local_path = tmpdir.join('test.log') + log_file = local_path.open('wb') + log_path = str(local_path.realpath()) + + with log.log_to_file(log_path, filter_level='ERROR'): + log.error("Error message") + log.warning("Warning message") + + log_file.close() + + log_file = local_path.open('rb') + log_entries = log_file.readlines() + log_file.close() + + assert len(log_entries) == 1 + assert eval(log_entries[0].strip())[-2:] == ( + 'ERROR', 'Error message') + + +def test_log_to_file_origin1(tmpdir): + + local_path = tmpdir.join('test.log') + log_file = local_path.open('wb') + log_path = str(local_path.realpath()) + + with log.log_to_file(log_path, filter_origin='astropy.tests'): + log.error("Error message") + log.warning("Warning message") + + log_file.close() + + log_file = local_path.open('rb') + log_entries = log_file.readlines() + log_file.close() + + assert len(log_entries) == 2 + + +def test_log_to_file_origin2(tmpdir): + + local_path = tmpdir.join('test.log') + log_file = local_path.open('wb') + log_path = str(local_path.realpath()) + + with log.log_to_file(log_path, filter_origin='astropy.wcs'): + log.error("Error message") + log.warning("Warning message") + + log_file.close() + + log_file = local_path.open('rb') + log_entries = log_file.readlines() + log_file.close() + + assert len(log_entries) == 0 diff --git a/astropy/tests/tests/__init__.py b/astropy/tests/tests/__init__.py new file mode 100644 index 0000000..800d82e --- /dev/null +++ b/astropy/tests/tests/__init__.py @@ -0,0 +1,2 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) diff --git a/astropy/tests/tests/data/open_file_detection.txt b/astropy/tests/tests/data/open_file_detection.txt new file mode 100644 index 0000000..2cfb00b --- /dev/null +++ b/astropy/tests/tests/data/open_file_detection.txt @@ -0,0 +1 @@ +CONTENTS diff --git a/astropy/tests/tests/test_imports.py b/astropy/tests/tests/test_imports.py new file mode 100644 index 0000000..55d4cec --- /dev/null +++ b/astropy/tests/tests/test_imports.py @@ -0,0 +1,72 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from ...extern import six + +import pkgutil +import os +import types + + +# Compatibility subpackages that should only be used on Python 2 +_py2_packages = set([ + 'astropy.extern.configobj_py2', +]) + +# Same but for Python 3 +_py3_packages = set([ + 'astropy.extern.configobj_py3', +]) + + +def test_imports(): + """ + This just imports all modules in astropy, making sure they don't have any + dependencies that sneak through + """ + + from ...utils import find_current_module + + pkgornm = find_current_module(1).__name__.split('.')[0] + + if isinstance(pkgornm, six.string_types): + package = pkgutil.get_loader(pkgornm).load_module(pkgornm) + elif (isinstance(pkgornm, types.ModuleType) and + '__init__' in pkgornm.__file__): + package = pkgornm + else: + msg = 'test_imports is not determining a valid package/package name' + raise TypeError(msg) + + if hasattr(package, '__path__'): + pkgpath = package.__path__ + elif hasattr(package, '__file__'): + pkgpath = os.path.split(package.__file__)[0] + else: + raise AttributeError('package to generate config items for does not ' + 'have __file__ or __path__') + + if six.PY2: + excludes = _py3_packages + else: + excludes = _py2_packages + + prefix = package.__name__ + '.' + + def onerror(name): + if not any(name.startswith(excl) for excl in excludes): + # A legitimate error occurred in a module that wasn't excluded + raise + + for imper, nm, ispkg in pkgutil.walk_packages(pkgpath, prefix, + onerror=onerror): + imper.find_module(nm) + + +def test_toplevel_namespace(): + import astropy + d = dir(astropy) + assert 'os' not in d + assert 'log' in d + assert 'test' in d + assert 'sys' not in d diff --git a/astropy/tests/tests/test_open_file_detection.py b/astropy/tests/tests/test_open_file_detection.py new file mode 100644 index 0000000..2f2d8c8 --- /dev/null +++ b/astropy/tests/tests/test_open_file_detection.py @@ -0,0 +1,17 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + + +from ...utils.data import get_pkg_data_filename + +fd = None + + +def test_open_file_detection(): + global fd + fd = open(get_pkg_data_filename('data/open_file_detection.txt')) + + +def teardown(): + if fd is not None: + fd.close() diff --git a/astropy/tests/tests/test_quantity_helpers.py b/astropy/tests/tests/test_quantity_helpers.py new file mode 100644 index 0000000..d700874 --- /dev/null +++ b/astropy/tests/tests/test_quantity_helpers.py @@ -0,0 +1,38 @@ +from ... import units as u + +from ..helper import assert_quantity_allclose, pytest + + +def test_assert_quantity_allclose(): + + assert_quantity_allclose([1, 2], [1, 2]) + + assert_quantity_allclose([1, 2] * u.m, [100, 200] * u.cm) + + assert_quantity_allclose([1, 2] * u.m, [101, 201] * u.cm, atol=2 * u.cm) + + with pytest.raises(AssertionError): + assert_quantity_allclose([1, 2] * u.m, [90, 200] * u.cm) + + with pytest.raises(AssertionError): + assert_quantity_allclose([1, 2] * u.m, [101, 201] * u.cm, atol=0.5 * u.cm) + + with pytest.raises(u.UnitsError) as exc: + assert_quantity_allclose([1, 2] * u.m, [100, 200]) + assert exc.value.args[0] == "Units for 'desired' () and 'actual' (m) are not convertible" + + with pytest.raises(u.UnitsError) as exc: + assert_quantity_allclose([1, 2], [100, 200] * u.cm) + assert exc.value.args[0] == "Units for 'desired' (cm) and 'actual' () are not convertible" + + with pytest.raises(u.UnitsError) as exc: + assert_quantity_allclose([1, 2] * u.m, [100, 200] * u.cm, atol=0.3) + assert exc.value.args[0] == "Units for 'atol' () and 'actual' (m) are not convertible" + + with pytest.raises(u.UnitsError) as exc: + assert_quantity_allclose([1, 2], [1, 2], atol=0.3 * u.m) + assert exc.value.args[0] == "Units for 'atol' (m) and 'actual' () are not convertible" + + with pytest.raises(u.UnitsError) as exc: + assert_quantity_allclose([1, 2], [1, 2], rtol=0.3 * u.m) + assert exc.value.args[0] == "`rtol` should be dimensionless" diff --git a/astropy/tests/tests/test_run_tests.py b/astropy/tests/tests/test_run_tests.py new file mode 100644 index 0000000..3104e1d --- /dev/null +++ b/astropy/tests/tests/test_run_tests.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- + +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import doctest + +from textwrap import dedent + +import pytest + +# test helper.run_tests function +from ... import test as run_tests +from ... extern import six + +from .. import helper + + +# run_tests should raise ValueError when asked to run on a module it can't find +def test_module_not_found(): + with helper.pytest.raises(ValueError): + run_tests(package='fake.module') + + +# run_tests should raise ValueError when passed an invalid pastebin= option +def test_pastebin_keyword(): + with helper.pytest.raises(ValueError): + run_tests(pastebin='not_an_option') + + +# TODO: Temporarily disabled, as this seems to non-deterministically fail +# def test_deprecation_warning(): +# with pytest.raises(DeprecationWarning): +# warnings.warn('test warning', DeprecationWarning) + + +def test_unicode_literal_conversion(): + assert isinstance('ångström', six.text_type) + + +def test_doctest_float_replacement(tmpdir): + test1 = dedent(""" + This will demonstrate a doctest that fails due to a few extra decimal + places:: + + >>> 1.0 / 3.0 + 0.333333333333333311 + """) + + test2 = dedent(""" + This is the same test, but it should pass with use of + +FLOAT_CMP:: + + >>> 1.0 / 3.0 # doctest: +FLOAT_CMP + 0.333333333333333311 + """) + + test1_rst = tmpdir.join('test1.rst') + test2_rst = tmpdir.join('test2.rst') + test1_rst.write(test1) + test2_rst.write(test2) + + with pytest.raises(doctest.DocTestFailure): + doctest.testfile(str(test1_rst), module_relative=False, + raise_on_error=True, verbose=False, encoding='utf-8') + + doctest.testfile(str(test2_rst), module_relative=False, + raise_on_error=True, verbose=False, encoding='utf-8') diff --git a/astropy/tests/tests/test_runner.py b/astropy/tests/tests/test_runner.py new file mode 100644 index 0000000..952e9ea --- /dev/null +++ b/astropy/tests/tests/test_runner.py @@ -0,0 +1,87 @@ +import pytest + +# Renamed these imports so that them being in the namespace will not +# cause pytest 3 to discover them as tests and then complain that +# they have __init__ defined. +from astropy.tests.runner import TestRunner as _TestRunner +from astropy.tests.runner import TestRunnerBase as _TestRunnerBase +from astropy.tests.runner import keyword + + +def test_disable_kwarg(): + class no_remote_data(_TestRunner): + @keyword() + def remote_data(self, remote_data, kwargs): + return NotImplemented + + r = no_remote_data('.') + with pytest.raises(TypeError): + r.run_tests(remote_data='bob') + + +def test_wrong_kwarg(): + r = _TestRunner('.') + with pytest.raises(TypeError): + r.run_tests(spam='eggs') + + +def test_invalid_kwarg(): + class bad_return(_TestRunnerBase): + @keyword() + def remote_data(self, remote_data, kwargs): + return 'bob' + + r = bad_return('.') + with pytest.raises(TypeError): + r.run_tests(remote_data='bob') + + +def test_new_kwarg(): + class Spam(_TestRunnerBase): + @keyword() + def spam(self, spam, kwargs): + return [spam] + + r = Spam('.') + + args = r._generate_args(spam='spam') + + assert ['spam'] == args + + +def test_priority(): + class Spam(_TestRunnerBase): + @keyword() + def spam(self, spam, kwargs): + return [spam] + + @keyword(priority=1) + def eggs(self, eggs, kwargs): + return [eggs] + + r = Spam('.') + + args = r._generate_args(spam='spam', eggs='eggs') + + assert ['eggs', 'spam'] == args + + +def test_docs(): + class Spam(_TestRunnerBase): + @keyword() + def spam(self, spam, kwargs): + """ + Spam Spam Spam + """ + return [spam] + + @keyword() + def eggs(self, eggs, kwargs): + """ + eggs asldjasljd + """ + return [eggs] + + r = Spam('.') + assert "eggs" in r.run_tests.__doc__ + assert "Spam Spam Spam" in r.run_tests.__doc__ diff --git a/astropy/tests/tests/test_skip_remote_data.py b/astropy/tests/tests/test_skip_remote_data.py new file mode 100644 index 0000000..21d789e --- /dev/null +++ b/astropy/tests/tests/test_skip_remote_data.py @@ -0,0 +1,49 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +# this test doesn't actually use any online data, it should just be skipped +# by run_tests because it has the remote_data decorator. +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import pytest + +from ..helper import remote_data +from ...utils.data import get_pkg_data_filename, download_file + + +@remote_data +def test_skip_remote_data(pytestconfig): + + # astropy.test() has remote_data=none or remote_data=astropy but we still + # got here somehow, so fail with a helpful message + + if pytestconfig.getoption('remote_data') == 'none': + pytest.fail('@remote_data was not skipped with remote_data=none') + elif pytestconfig.getoption('remote_data') == 'astropy': + pytest.fail('@remote_data was not skipped with remote_data=astropy') + + # Test Astropy URL + get_pkg_data_filename('galactic_center/gc_2mass_k.fits') + + # Test non-Astropy URL + download_file('http://www.google.com') + + +@remote_data(source='astropy') +def test_skip_remote_data_astropy(pytestconfig): + + # astropy.test() has remote_data=none but we still got here somehow, + # so fail with a helpful message + + if pytestconfig.getoption('remote_data') == 'none': + pytest.fail('@remote_data was not skipped with remote_data=none') + + # Test Astropy URL + get_pkg_data_filename('galactic_center/gc_2mass_k.fits') + + # Test non-Astropy URL + if pytestconfig.getoption('remote_data') == 'astropy': + with pytest.raises(Exception) as exc: + download_file('http://www.google.com') + assert "An attempt was made to connect to the internet" in str(exc.value) + else: + download_file('http://www.google.com') diff --git a/astropy/tests/tests/test_socketblocker.py b/astropy/tests/tests/test_socketblocker.py new file mode 100644 index 0000000..f1b1fdd --- /dev/null +++ b/astropy/tests/tests/test_socketblocker.py @@ -0,0 +1,87 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import sys +import time + +from threading import Thread + +import pytest + +from ..disable_internet import no_internet +from ...extern.six.moves import BaseHTTPServer, SimpleHTTPServer +from ...extern.six.moves.urllib.request import urlopen + + +def test_outgoing_fails(): + with pytest.raises(IOError): + with no_internet(): + urlopen('http://www.python.org') + + +class StoppableHTTPServer(BaseHTTPServer.HTTPServer, object): + def __init__(self, *args): + super(StoppableHTTPServer, self).__init__(*args) + self.stop = False + + def handle_request(self): + self.stop = True + super(StoppableHTTPServer, self).handle_request() + + def serve_forever(self): + """ + Serve until stop set, which will happen if any request is handled + """ + while not self.stop: + self.handle_request() + + +@pytest.mark.parametrize(('localhost'), ('localhost', '127.0.0.1')) +def test_localconnect_succeeds(localhost): + """ + Ensure that connections to localhost are allowed, since these are genuinely + not remotedata. + """ + + # port "0" means find open port + # see http://stackoverflow.com/questions/1365265/on-localhost-how-to-pick-a-free-port-number + httpd = StoppableHTTPServer(('localhost', 0), + SimpleHTTPServer.SimpleHTTPRequestHandler) + + port = httpd.socket.getsockname()[1] + + server = Thread(target=httpd.serve_forever) + server.setDaemon(True) + + server.start() + time.sleep(0.1) + + urlopen('http://{localhost:s}:{port:d}'.format(localhost=localhost, port=port)).close() + + +PY3_4 = sys.version_info[:2] >= (3, 4) + + +# Used for the below test--inline functions aren't pickleable +# by multiprocessing? +def _square(x): + return x ** 2 + + +@pytest.mark.skipif('not PY3_4 or sys.platform == "win32" or sys.platform.startswith("gnu0")') +def test_multiprocessing_forkserver(): + """ + Test that using multiprocessing with forkserver works. Perhaps + a simpler more direct test would be to just open some local + sockets and pass something through them. + + Regression test for https://github.com/astropy/astropy/pull/3713 + """ + + import multiprocessing + ctx = multiprocessing.get_context('forkserver') + pool = ctx.Pool(1) + result = pool.map(_square, [1, 2, 3, 4, 5]) + pool.close() + pool.join() + assert result == [1, 4, 9, 16, 25] diff --git a/astropy/time/__init__.py b/astropy/time/__init__.py new file mode 100644 index 0000000..564a408 --- /dev/null +++ b/astropy/time/__init__.py @@ -0,0 +1,3 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from .formats import * +from .core import * diff --git a/astropy/time/core.py b/astropy/time/core.py new file mode 100644 index 0000000..94a4bd9 --- /dev/null +++ b/astropy/time/core.py @@ -0,0 +1,1758 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +The astropy.time package provides functionality for manipulating times and +dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI, +UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in +astronomy. +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import copy +import operator +from datetime import datetime +from collections import defaultdict + +import numpy as np + +from .. import units as u, constants as const +from .. import _erfa as erfa +from ..units import UnitConversionError +from ..utils.decorators import lazyproperty +from ..utils import ShapedLikeNDArray +from ..utils.compat.misc import override__dir__ +from ..utils.data_info import MixinInfo, data_info_factory +from ..utils.compat.numpy import broadcast_to +from ..extern import six +from ..extern.six.moves import zip +from .utils import day_frac +from .formats import (TIME_FORMATS, TIME_DELTA_FORMATS, + TimeJD, TimeUnique, TimeAstropyTime, TimeDatetime) +# Import TimeFromEpoch to avoid breaking code that followed the old example of +# making a custom timescale in the documentation. +from .formats import TimeFromEpoch # pylint: disable=W0611 + + +__all__ = ['Time', 'TimeDelta', 'TIME_SCALES', 'TIME_DELTA_SCALES', + 'ScaleValueError', 'OperandTypeError', 'TimeInfo'] + + +TIME_SCALES = ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc') +MULTI_HOPS = {('tai', 'tcb'): ('tt', 'tdb'), + ('tai', 'tcg'): ('tt',), + ('tai', 'ut1'): ('utc',), + ('tai', 'tdb'): ('tt',), + ('tcb', 'tcg'): ('tdb', 'tt'), + ('tcb', 'tt'): ('tdb',), + ('tcb', 'ut1'): ('tdb', 'tt', 'tai', 'utc'), + ('tcb', 'utc'): ('tdb', 'tt', 'tai'), + ('tcg', 'tdb'): ('tt',), + ('tcg', 'ut1'): ('tt', 'tai', 'utc'), + ('tcg', 'utc'): ('tt', 'tai'), + ('tdb', 'ut1'): ('tt', 'tai', 'utc'), + ('tdb', 'utc'): ('tt', 'tai'), + ('tt', 'ut1'): ('tai', 'utc'), + ('tt', 'utc'): ('tai',), + } +GEOCENTRIC_SCALES = ('tai', 'tt', 'tcg') +BARYCENTRIC_SCALES = ('tcb', 'tdb') +ROTATIONAL_SCALES = ('ut1',) +TIME_DELTA_TYPES = dict((scale, scales) + for scales in (GEOCENTRIC_SCALES, BARYCENTRIC_SCALES, + ROTATIONAL_SCALES) for scale in scales) +TIME_DELTA_SCALES = TIME_DELTA_TYPES.keys() +# For time scale changes, we need L_G and L_B, which are stored in erfam.h as +# /* L_G = 1 - d(TT)/d(TCG) */ +# define ERFA_ELG (6.969290134e-10) +# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */ +# define ERFA_ELB (1.550519768e-8) +# These are exposed in erfa as erfa.ELG and erfa.ELB. +# Implied: d(TT)/d(TCG) = 1-L_G +# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G) +# scale offsets as second = first + first * scale_offset[(first,second)] +SCALE_OFFSETS = {('tt', 'tai'): None, + ('tai', 'tt'): None, + ('tcg', 'tt'): -erfa.ELG, + ('tt', 'tcg'): erfa.ELG / (1. - erfa.ELG), + ('tcg', 'tai'): -erfa.ELG, + ('tai', 'tcg'): erfa.ELG / (1. - erfa.ELG), + ('tcb', 'tdb'): -erfa.ELB, + ('tdb', 'tcb'): erfa.ELB / (1. - erfa.ELB)} + +# triple-level dictionary, yay! +SIDEREAL_TIME_MODELS = { + 'mean': { + 'IAU2006': {'function': erfa.gmst06, 'scales': ('ut1', 'tt')}, + 'IAU2000': {'function': erfa.gmst00, 'scales': ('ut1', 'tt')}, + 'IAU1982': {'function': erfa.gmst82, 'scales': ('ut1',)}}, + 'apparent': { + 'IAU2006A': {'function': erfa.gst06a, 'scales': ('ut1', 'tt')}, + 'IAU2000A': {'function': erfa.gst00a, 'scales': ('ut1', 'tt')}, + 'IAU2000B': {'function': erfa.gst00b, 'scales': ('ut1',)}, + 'IAU1994': {'function': erfa.gst94, 'scales': ('ut1',)}}} + + +class TimeInfo(MixinInfo): + """ + Container for meta information like name, description, format. This is + required when the object is used as a mixin column within a table, but can + be used as a general way to store meta information. + """ + attrs_from_parent = set(['unit']) # unit is read-only and None + attr_names = MixinInfo.attr_names | {'serialize_method'} + _supports_indexing = True + + # The usual tuple of attributes needed for serialization is replaced + # by a property, since Time can be serialized different ways. + _represent_as_dict_extra_attrs = ('format', 'scale', 'precision', + 'in_subfmt', 'out_subfmt', 'location', + '_delta_ut1_utc', '_delta_tdb_tt') + + @property + def _represent_as_dict_attrs(self): + method = self.serialize_method[self._serialize_context] + if method == 'formatted_value': + out = ('value',) + elif method == 'jd1_jd2': + out = ('jd1', 'jd2') + else: + raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'") + + return out + self._represent_as_dict_extra_attrs + + def __init__(self, bound=False): + super(MixinInfo, self).__init__(bound) + + # If bound to a data object instance then create the dict of attributes + # which stores the info attribute values. + if bound: + # Specify how to serialize this object depending on context. + # If ``True`` for a context, then use formatted ``value`` attribute + # (e.g. the ISO time string). If ``False`` then use decimal jd1 and jd2. + self.serialize_method = {'fits': 'jd1_jd2', + 'ecsv': 'formatted_value', + 'hdf5': 'jd1_jd2', + 'yaml': 'jd1_jd2', + None: 'jd1_jd2'} + + @property + def unit(self): + return None + + info_summary_stats = staticmethod( + data_info_factory(names=MixinInfo._stats, + funcs=[getattr(np, stat) for stat in MixinInfo._stats])) + # When Time has mean, std, min, max methods: + # funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats]) + + def _construct_from_dict_base(self, map): + if 'jd1' in map and 'jd2' in map: + format = map.pop('format') + map['format'] = 'jd' + map['val'] = map.pop('jd1') + map['val2'] = map.pop('jd2') + else: + format = map['format'] + map['val'] = map.pop('value') + + out = self._parent_cls(**map) + out.format = format + return out + + def _construct_from_dict(self, map): + delta_ut1_utc = map.pop('_delta_ut1_utc', None) + delta_tdb_tt = map.pop('_delta_tdb_tt', None) + + out = self._construct_from_dict_base(map) + + if delta_ut1_utc is not None: + out._delta_ut1_utc = delta_ut1_utc + if delta_tdb_tt is not None: + out._delta_tdb_tt = delta_tdb_tt + + return out + + +class TimeDeltaInfo(TimeInfo): + _represent_as_dict_extra_attrs = ('format', 'scale') + + def _construct_from_dict(self, map): + return self._construct_from_dict_base(map) + + +class Time(ShapedLikeNDArray): + """ + Represent and manipulate times and dates for astronomy. + + A `Time` object is initialized with one or more times in the ``val`` + argument. The input times in ``val`` must conform to the specified + ``format`` and must correspond to the specified time ``scale``. The + optional ``val2`` time input should be supplied only for numeric input + formats (e.g. JD) where very high precision (better than 64-bit precision) + is required. + + The allowed values for ``format`` can be listed with:: + + >>> list(Time.FORMATS) + ['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date', + 'datetime', 'iso', 'isot', 'yday', 'fits', 'byear', 'jyear', 'byear_str', + 'jyear_str'] + + Parameters + ---------- + val : sequence, str, number, or `~astropy.time.Time` object + Value(s) to initialize the time or times. + val2 : sequence, str, or number; optional + Value(s) to initialize the time or times. + format : str, optional + Format of input value(s) + scale : str, optional + Time scale of input value(s), must be one of the following: + ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc') + precision : int, optional + Digits of precision in string representation of time + in_subfmt : str, optional + Subformat for inputting string times + out_subfmt : str, optional + Subformat for outputting string times + location : `~astropy.coordinates.EarthLocation` or tuple, optional + If given as an tuple, it should be able to initialize an + an EarthLocation instance, i.e., either contain 3 items with units of + length for geocentric coordinates, or contain a longitude, latitude, + and an optional height for geodetic coordinates. + Can be a single location, or one for each input time. + copy : bool, optional + Make a copy of the input values + """ + + SCALES = TIME_SCALES + """List of time scales""" + + FORMATS = TIME_FORMATS + """Dict of time formats""" + + # Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__) + # gets called over the __mul__ of Numpy arrays. + __array_priority__ = 20000 + + # Declare that Time can be used as a Table column by defining the + # attribute where column attributes will be stored. + _astropy_column_attrs = None + + def __new__(cls, val, val2=None, format=None, scale=None, + precision=None, in_subfmt=None, out_subfmt=None, + location=None, copy=False): + + if isinstance(val, cls): + self = val.replicate(format=format, copy=copy) + else: + self = super(Time, cls).__new__(cls) + + return self + + def __getnewargs__(self): + return (self._time,) + + def __init__(self, val, val2=None, format=None, scale=None, + precision=None, in_subfmt=None, out_subfmt=None, + location=None, copy=False): + + if location is not None: + from ..coordinates import EarthLocation + if isinstance(location, EarthLocation): + self.location = location + else: + self.location = EarthLocation(*location) + else: + self.location = None + + if isinstance(val, Time): + # Update _time formatting parameters if explicitly specified + if precision is not None: + self._time.precision = precision + if in_subfmt is not None: + self._time.in_subfmt = in_subfmt + if out_subfmt is not None: + self._time.out_subfmt = out_subfmt + + if scale is not None: + self._set_scale(scale) + else: + self._init_from_vals(val, val2, format, scale, copy, + precision, in_subfmt, out_subfmt) + + if self.location is not None and (self.location.size > 1 and + self.location.shape != self.shape): + try: + # check the location can be broadcast to self's shape. + self.location = broadcast_to(self.location, self.shape, + subok=True) + except Exception: + raise ValueError('The location with shape {0} cannot be ' + 'broadcast against time with shape {1}. ' + 'Typically, either give a single location or ' + 'one for each time.' + .format(self.location.shape, self.shape)) + + def _init_from_vals(self, val, val2, format, scale, copy, + precision=None, in_subfmt=None, out_subfmt=None): + """ + Set the internal _format, scale, and _time attrs from user + inputs. This handles coercion into the correct shapes and + some basic input validation. + """ + if precision is None: + precision = 3 + if in_subfmt is None: + in_subfmt = '*' + if out_subfmt is None: + out_subfmt = '*' + + # Coerce val into an array + val = _make_array(val, copy) + + # If val2 is not None, ensure consistency + if val2 is not None: + val2 = _make_array(val2, copy) + try: + np.broadcast(val, val2) + except ValueError: + raise ValueError('Input val and val2 have inconsistent shape; ' + 'they cannot be broadcast together.') + + if scale is not None: + if not (isinstance(scale, six.string_types) and + scale.lower() in self.SCALES): + raise ScaleValueError("Scale {0!r} is not in the allowed scales " + "{1}".format(scale, + sorted(self.SCALES))) + + # Parse / convert input values into internal jd1, jd2 based on format + self._time = self._get_time_fmt(val, val2, format, scale, + precision, in_subfmt, out_subfmt) + self._format = self._time.name + + def _get_time_fmt(self, val, val2, format, scale, + precision, in_subfmt, out_subfmt): + """ + Given the supplied val, val2, format and scale try to instantiate + the corresponding TimeFormat class to convert the input values into + the internal jd1 and jd2. + + If format is `None` and the input is a string-type or object array then + guess available formats and stop when one matches. + """ + + if format is None and val.dtype.kind in ('S', 'U', 'O'): + formats = [(name, cls) for name, cls in self.FORMATS.items() + if issubclass(cls, TimeUnique)] + err_msg = ('any of the formats where the format keyword is ' + 'optional {0}'.format([name for name, cls in formats])) + # AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry, + # but try to guess it at the end. + formats.append(('astropy_time', TimeAstropyTime)) + + elif not (isinstance(format, six.string_types) and + format.lower() in self.FORMATS): + if format is None: + raise ValueError("No time format was given, and the input is " + "not unique") + else: + raise ValueError("Format {0!r} is not one of the allowed " + "formats {1}".format(format, + sorted(self.FORMATS))) + else: + formats = [(format, self.FORMATS[format])] + err_msg = 'the format class {0}'.format(format) + + for format, FormatClass in formats: + try: + return FormatClass(val, val2, scale, precision, in_subfmt, out_subfmt) + except UnitConversionError: + raise + except (ValueError, TypeError): + pass + else: + raise ValueError('Input values did not match {0}'.format(err_msg)) + + @classmethod + def now(cls): + """ + Creates a new object corresponding to the instant in time this + method is called. + + .. note:: + "Now" is determined using the `~datetime.datetime.utcnow` + function, so its accuracy and precision is determined by that + function. Generally that means it is set by the accuracy of + your system clock. + + Returns + ------- + nowtime + A new `Time` object (or a subclass of `Time` if this is called from + such a subclass) at the current time. + """ + # call `utcnow` immediately to be sure it's ASAP + dtnow = datetime.utcnow() + return cls(val=dtnow, format='datetime', scale='utc') + + info = TimeInfo() + + @property + def format(self): + """ + Get or set time format. + + The format defines the way times are represented when accessed via the + ``.value`` attribute. By default it is the same as the format used for + initializing the `Time` instance, but it can be set to any other value + that could be used for initialization. These can be listed with:: + + >>> list(Time.FORMATS) + ['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date', + 'datetime', 'iso', 'isot', 'yday', 'fits', 'byear', 'jyear', 'byear_str', + 'jyear_str'] + """ + return self._format + + @format.setter + def format(self, format): + """Set time format""" + if format not in self.FORMATS: + raise ValueError('format must be one of {0}' + .format(list(self.FORMATS))) + format_cls = self.FORMATS[format] + + # If current output subformat is not in the new format then replace + # with default '*' + if hasattr(format_cls, 'subfmts'): + subfmt_names = [subfmt[0] for subfmt in format_cls.subfmts] + if self.out_subfmt not in subfmt_names: + self.out_subfmt = '*' + + self._time = format_cls(self._time.jd1, self._time.jd2, + self._time._scale, self.precision, + in_subfmt=self.in_subfmt, + out_subfmt=self.out_subfmt, + from_jd=True) + self._format = format + + def __repr__(self): + return ("<{0} object: scale='{1}' format='{2}' value={3}>" + .format(self.__class__.__name__, self.scale, self.format, + getattr(self, self.format))) + + def __str__(self): + return str(getattr(self, self.format)) + + @property + def scale(self): + """Time scale""" + return self._time.scale + + def _set_scale(self, scale): + """ + This is the key routine that actually does time scale conversions. + This is not public and not connected to the read-only scale property. + """ + + if scale == self.scale: + return + if scale not in self.SCALES: + raise ValueError("Scale {0!r} is not in the allowed scales {1}" + .format(scale, sorted(self.SCALES))) + + # Determine the chain of scale transformations to get from the current + # scale to the new scale. MULTI_HOPS contains a dict of all + # transformations (xforms) that require intermediate xforms. + # The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order. + xform = (self.scale, scale) + xform_sort = tuple(sorted(xform)) + multi = MULTI_HOPS.get(xform_sort, ()) + xforms = xform_sort[:1] + multi + xform_sort[-1:] + # If we made the reverse xform then reverse it now. + if xform_sort != xform: + xforms = tuple(reversed(xforms)) + + # Transform the jd1,2 pairs through the chain of scale xforms. + jd1, jd2 = self._time.jd1, self._time.jd2 + for sys1, sys2 in zip(xforms[:-1], xforms[1:]): + # Some xforms require an additional delta_ argument that is + # provided through Time methods. These values may be supplied by + # the user or computed based on available approximations. The + # get_delta_ methods are available for only one combination of + # sys1, sys2 though the property applies for both xform directions. + args = [jd1, jd2] + for sys12 in ((sys1, sys2), (sys2, sys1)): + dt_method = '_get_delta_{0}_{1}'.format(*sys12) + try: + get_dt = getattr(self, dt_method) + except AttributeError: + pass + else: + args.append(get_dt(jd1, jd2)) + break + + conv_func = getattr(erfa, sys1 + sys2) + jd1, jd2 = conv_func(*args) + self._time = self.FORMATS[self.format](jd1, jd2, scale, self.precision, + self.in_subfmt, self.out_subfmt, + from_jd=True) + + @property + def precision(self): + """ + Decimal precision when outputting seconds as floating point (int + value between 0 and 9 inclusive). + """ + return self._time.precision + + @precision.setter + def precision(self, val): + if not isinstance(val, int) or val < 0 or val > 9: + raise ValueError('precision attribute must be an int between ' + '0 and 9') + self._time.precision = val + del self.cache + + @property + def in_subfmt(self): + """ + Unix wildcard pattern to select subformats for parsing string input + times. + """ + return self._time.in_subfmt + + @in_subfmt.setter + def in_subfmt(self, val): + if not isinstance(val, six.string_types): + raise ValueError('in_subfmt attribute must be a string') + self._time.in_subfmt = val + del self.cache + + @property + def out_subfmt(self): + """ + Unix wildcard pattern to select subformats for outputting times. + """ + return self._time.out_subfmt + + @out_subfmt.setter + def out_subfmt(self, val): + if not isinstance(val, six.string_types): + raise ValueError('out_subfmt attribute must be a string') + self._time.out_subfmt = val + del self.cache + + @property + def shape(self): + """The shape of the time instances. + + Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a + tuple. Note that if different instances share some but not all + underlying data, setting the shape of one instance can make the other + instance unusable. Hence, it is strongly recommended to get new, + reshaped instances with the ``reshape`` method. + + Raises + ------ + AttributeError + If the shape of the ``jd1``, ``jd2``, ``location``, + ``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed + without the arrays being copied. For these cases, use the + `Time.reshape` method (which copies any arrays that cannot be + reshaped in-place). + """ + return self._time.jd1.shape + + @shape.setter + def shape(self, shape): + # We have to keep track of arrays that were already reshaped, + # since we may have to return those to their original shape if a later + # shape-setting fails. + reshaped = [] + oldshape = self.shape + for attr in ('jd1', 'jd2', '_delta_ut1_utc', '_delta_tdb_tt', + 'location'): + val = getattr(self, attr, None) + if val is not None and val.size > 1: + try: + val.shape = shape + except AttributeError: + for val2 in reshaped: + val2.shape = oldshape + raise + else: + reshaped.append(val) + + def _shaped_like_input(self, value): + return value if self._time.jd1.shape else value.item() + + @property + def jd1(self): + """ + First of the two doubles that internally store time value(s) in JD. + """ + return self._shaped_like_input(self._time.jd1) + + @property + def jd2(self): + """ + Second of the two doubles that internally store time value(s) in JD. + """ + return self._shaped_like_input(self._time.jd2) + + @property + def value(self): + """Time value(s) in current format""" + # The underlying way to get the time values for the current format is: + # self._shaped_like_input(self._time.to_value(parent=self)) + # This is done in __getattr__. By calling getattr(self, self.format) + # the ``value`` attribute is cached. + return getattr(self, self.format) + + def light_travel_time(self, skycoord, kind='barycentric', location=None, ephemeris=None): + """Light travel time correction to the barycentre or heliocentre. + + The frame transformations used to calculate the location of the solar + system barycentre and the heliocentre rely on the erfa routine epv00, + which is consistent with the JPL DE405 ephemeris to an accuracy of + 11.2 km, corresponding to a light travel time of 4 microseconds. + + The routine assumes the source(s) are at large distance, i.e., neglects + finite-distance effects. + + Parameters + ---------- + skycoord : `~astropy.coordinates.SkyCoord` + The sky location to calculate the correction for. + kind : str, optional + ``'barycentric'`` (default) or ``'heliocentric'`` + location : `~astropy.coordinates.EarthLocation`, optional + The location of the observatory to calculate the correction for. + If no location is given, the ``location`` attribute of the Time + object is used + ephemeris : str, optional + Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default, + use the one set with ``astropy.coordinates.solar_system_ephemeris.set``. + For more information, see `~astropy.coordinates.solar_system_ephemeris`. + + Returns + ------- + time_offset : `~astropy.time.TimeDelta` + The time offset between the barycentre or Heliocentre and Earth, + in TDB seconds. Should be added to the original time to get the + time in the Solar system barycentre or the Heliocentre. + """ + + if kind.lower() not in ('barycentric', 'heliocentric'): + raise ValueError("'kind' parameter must be one of 'heliocentric' " + "or 'barycentric'") + + if location is None: + if self.location is None: + raise ValueError('An EarthLocation needs to be set or passed ' + 'in to calculate bary- or heliocentric ' + 'corrections') + location = self.location + + from ..coordinates import (UnitSphericalRepresentation, CartesianRepresentation, + HCRS, ICRS, GCRS, solar_system_ephemeris) + + # ensure sky location is ICRS compatible + if not skycoord.is_transformable_to(ICRS()): + raise ValueError("Given skycoord is not transformable to the ICRS") + + # get location of observatory in ITRS coordinates at this Time + try: + itrs = location.get_itrs(obstime=self) + except Exception: + raise ValueError("Supplied location does not have a valid `get_itrs` method") + + with solar_system_ephemeris.set(ephemeris): + if kind.lower() == 'heliocentric': + # convert to heliocentric coordinates, aligned with ICRS + cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz + else: + # first we need to convert to GCRS coordinates with the correct + # obstime, since ICRS coordinates have no frame time + gcrs_coo = itrs.transform_to(GCRS(obstime=self)) + # convert to barycentric (BCRS) coordinates, aligned with ICRS + cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz + + # get unit ICRS vector to star + spos = (skycoord.icrs.represent_as(UnitSphericalRepresentation). + represent_as(CartesianRepresentation).xyz) + + # Move X,Y,Z to last dimension, to enable possible broadcasting below. + cpos = np.rollaxis(cpos, 0, cpos.ndim) + spos = np.rollaxis(spos, 0, spos.ndim) + + # calculate light travel time correction + tcor_val = (spos * cpos).sum(axis=-1) / const.c + return TimeDelta(tcor_val, scale='tdb') + + def sidereal_time(self, kind, longitude=None, model=None): + """Calculate sidereal time. + + Parameters + --------------- + kind : str + ``'mean'`` or ``'apparent'``, i.e., accounting for precession + only, or also for nutation. + longitude : `~astropy.units.Quantity`, `str`, or `None`; optional + The longitude on the Earth at which to compute the sidereal time. + Can be given as a `~astropy.units.Quantity` with angular units + (or an `~astropy.coordinates.Angle` or + `~astropy.coordinates.Longitude`), or as a name of an + observatory (currently, only ``'greenwich'`` is supported, + equivalent to 0 deg). If `None` (default), the ``lon`` attribute of + the Time object is used. + model : str or `None`; optional + Precession (and nutation) model to use. The available ones are: + - {0}: {1} + - {2}: {3} + If `None` (default), the last (most recent) one from the appropriate + list above is used. + + Returns + ------- + sidereal time : `~astropy.coordinates.Longitude` + Sidereal time as a quantity with units of hourangle + """ # docstring is formatted below + + from ..coordinates import Longitude + + if kind.lower() not in SIDEREAL_TIME_MODELS.keys(): + raise ValueError('The kind of sidereal time has to be {0}'.format( + ' or '.join(sorted(SIDEREAL_TIME_MODELS.keys())))) + + available_models = SIDEREAL_TIME_MODELS[kind.lower()] + + if model is None: + model = sorted(available_models.keys())[-1] + else: + if model.upper() not in available_models: + raise ValueError( + 'Model {0} not implemented for {1} sidereal time; ' + 'available models are {2}' + .format(model, kind, sorted(available_models.keys()))) + + if longitude is None: + if self.location is None: + raise ValueError('No longitude is given but the location for ' + 'the Time object is not set.') + longitude = self.location.lon + elif longitude == 'greenwich': + longitude = Longitude(0., u.degree, + wrap_angle=180.*u.degree) + else: + # sanity check on input + longitude = Longitude(longitude, u.degree, + wrap_angle=180.*u.degree) + + gst = self._erfa_sidereal_time(available_models[model.upper()]) + return Longitude(gst + longitude, u.hourangle) + + if isinstance(sidereal_time.__doc__, six.string_types): + sidereal_time.__doc__ = sidereal_time.__doc__.format( + 'apparent', sorted(SIDEREAL_TIME_MODELS['apparent'].keys()), + 'mean', sorted(SIDEREAL_TIME_MODELS['mean'].keys())) + + def _erfa_sidereal_time(self, model): + """Calculate a sidereal time using a IAU precession/nutation model.""" + + from ..coordinates import Longitude + + erfa_function = model['function'] + erfa_parameters = [getattr(getattr(self, scale)._time, jd_part) + for scale in model['scales'] + for jd_part in ('jd1', 'jd2')] + + sidereal_time = erfa_function(*erfa_parameters) + + return Longitude(sidereal_time, u.radian).to(u.hourangle) + + def copy(self, format=None): + """ + Return a fully independent copy the Time object, optionally changing + the format. + + If ``format`` is supplied then the time format of the returned Time + object will be set accordingly, otherwise it will be unchanged from the + original. + + In this method a full copy of the internal time arrays will be made. + The internal time arrays are normally not changeable by the user so in + most cases the ``replicate()`` method should be used. + + Parameters + ---------- + format : str, optional + Time format of the copy. + + Returns + ------- + tm : Time object + Copy of this object + """ + return self._apply('copy', format=format) + + def replicate(self, format=None, copy=False): + """ + Return a replica of the Time object, optionally changing the format. + + If ``format`` is supplied then the time format of the returned Time + object will be set accordingly, otherwise it will be unchanged from the + original. + + If ``copy`` is set to `True` then a full copy of the internal time arrays + will be made. By default the replica will use a reference to the + original arrays when possible to save memory. The internal time arrays + are normally not changeable by the user so in most cases it should not + be necessary to set ``copy`` to `True`. + + The convenience method copy() is available in which ``copy`` is `True` + by default. + + Parameters + ---------- + format : str, optional + Time format of the replica. + copy : bool, optional + Return a true copy instead of using references where possible. + + Returns + ------- + tm : Time object + Replica of this object + """ + return self._apply('copy' if copy else 'replicate', format=format) + + def _apply(self, method, *args, **kwargs): + """Create a new time object, possibly applying a method to the arrays. + + Parameters + ---------- + method : str or callable + If string, can be 'replicate' or the name of a relevant + `~numpy.ndarray` method. In the former case, a new time instance + with unchanged internal data is created, while in the latter the + method is applied to the internal ``jd1`` and ``jd2`` arrays, as + well as to possible ``location``, ``_delta_ut1_utc``, and + ``_delta_tdb_tt`` arrays. + If a callable, it is directly applied to the above arrays. + Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`. + args : tuple + Any positional arguments for ``method``. + kwargs : dict + Any keyword arguments for ``method``. If the ``format`` keyword + argument is present, this will be used as the Time format of the + replica. + + Examples + -------- + Some ways this is used internally:: + + copy : ``_apply('copy')`` + replicate : ``_apply('replicate')`` + reshape : ``_apply('reshape', new_shape)`` + index or slice : ``_apply('__getitem__', item)`` + broadcast : ``_apply(np.broadcast, shape=new_shape)`` + """ + new_format = kwargs.pop('format', None) + if new_format is None: + new_format = self.format + + if callable(method): + apply_method = lambda array: method(array, *args, **kwargs) + else: + if method == 'replicate': + apply_method = None + else: + apply_method = operator.methodcaller(method, *args, **kwargs) + + jd1, jd2 = self._time.jd1, self._time.jd2 + if apply_method: + jd1 = apply_method(jd1) + jd2 = apply_method(jd2) + + tm = super(Time, self.__class__).__new__(self.__class__) + tm._time = TimeJD(jd1, jd2, self.scale, self.precision, + self.in_subfmt, self.out_subfmt, from_jd=True) + # Optional ndarray attributes. + for attr in ('_delta_ut1_utc', '_delta_tdb_tt', 'location', + 'precision', 'in_subfmt', 'out_subfmt'): + try: + val = getattr(self, attr) + except AttributeError: + continue + + if apply_method: + # Apply the method to any value arrays (though skip if there is + # only a single element and the method would return a view, + # since in that case nothing would change). + if getattr(val, 'size', 1) > 1: + val = apply_method(val) + elif method == 'copy' or method == 'flatten': + # flatten should copy also for a single element array, but + # we cannot use it directly for array scalars, since it + # always returns a one-dimensional array. So, just copy. + val = copy.copy(val) + + setattr(tm, attr, val) + + # Copy other 'info' attr only if it has actually been defined. + # See PR #3898 for further explanation and justification, along + # with Quantity.__array_finalize__ + if 'info' in self.__dict__: + tm.info = self.info + + # Make the new internal _time object corresponding to the format + # in the copy. If the format is unchanged this process is lightweight + # and does not create any new arrays. + if new_format not in tm.FORMATS: + raise ValueError('format must be one of {0}' + .format(list(tm.FORMATS))) + + NewFormat = tm.FORMATS[new_format] + tm._time = NewFormat(tm._time.jd1, tm._time.jd2, + tm._time._scale, tm.precision, + tm.in_subfmt, tm.out_subfmt, + from_jd=True) + tm._format = new_format + + return tm + + def __copy__(self): + """ + Overrides the default behavior of the `copy.copy` function in + the python stdlib to behave like `Time.copy`. Does *not* make a + copy of the JD arrays - only copies by reference. + """ + return self.replicate() + + def __deepcopy__(self, memo): + """ + Overrides the default behavior of the `copy.deepcopy` function + in the python stdlib to behave like `Time.copy`. Does make a + copy of the JD arrays. + """ + return self.copy() + + def _advanced_index(self, indices, axis=None, keepdims=False): + """Turn argmin, argmax output into an advanced index. + + Argmin, argmax output contains indices along a given axis in an array + shaped like the other dimensions. To use this to get values at the + correct location, a list is constructed in which the other axes are + indexed sequentially. For ``keepdims`` is ``True``, the net result is + the same as constructing an index grid with ``np.ogrid`` and then + replacing the ``axis`` item with ``indices`` with its shaped expanded + at ``axis``. For ``keepdims`` is ``False``, the result is the same but + with the ``axis`` dimension removed from all list entries. + + For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`. + + Parameters + ---------- + indices : array + Output of argmin or argmax. + axis : int or None + axis along which argmin or argmax was used. + keepdims : bool + Whether to construct indices that keep or remove the axis along + which argmin or argmax was used. Default: ``False``. + + Returns + ------- + advanced_index : list of arrays + Suitable for use as an advanced index. + """ + if axis is None: + return np.unravel_index(indices, self.shape) + + ndim = self.ndim + if axis < 0: + axis = axis + ndim + + if keepdims and indices.ndim < self.ndim: + indices = np.expand_dims(indices, axis) + return [(indices if i == axis else np.arange(s).reshape( + (1,)*(i if keepdims or i < axis else i-1) + (s,) + + (1,)*(ndim-i-(1 if keepdims or i > axis else 2)))) + for i, s in enumerate(self.shape)] + + def argmin(self, axis=None, out=None): + """Return indices of the minimum values along the given axis. + + This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure + that the full precision given by the two doubles ``jd1`` and ``jd2`` + is used. See :func:`~numpy.argmin` for detailed documentation. + """ + # first get the minimum at normal precision. + jd = self.jd1 + self.jd2 + approx = jd.min(axis, keepdims=True) + + # Approx is very close to the true minimum, and by subtracting it at + # full precision, all numbers near 0 can be represented correctly, + # so we can be sure we get the true minimum. + # The below is effectively what would be done for + # dt = (self - self.__class__(approx, format='jd')).jd + # which translates to: + # approx_jd1, approx_jd2 = day_frac(approx, 0.) + # dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2) + dt = (self.jd1 - approx) + self.jd2 + return dt.argmin(axis, out) + + def argmax(self, axis=None, out=None): + """Return indices of the maximum values along the given axis. + + This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure + that the full precision given by the two doubles ``jd1`` and ``jd2`` + is used. See :func:`~numpy.argmax` for detailed documentation. + """ + # For procedure, see comment on argmin. + jd = self.jd1 + self.jd2 + approx = jd.max(axis, keepdims=True) + + dt = (self.jd1 - approx) + self.jd2 + return dt.argmax(axis, out) + + def argsort(self, axis=-1): + """Returns the indices that would sort the time array. + + This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure + that the full precision given by the two doubles ``jd1`` and ``jd2`` + is used, and that corresponding attributes are copied. Internally, + it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen. + """ + jd_approx = self.jd + jd_remainder = (self - self.__class__(jd_approx, format='jd')).jd + if axis is None: + return np.lexsort((jd_remainder.ravel(), jd_approx.ravel())) + else: + return np.lexsort(keys=(jd_remainder, jd_approx), axis=axis) + + def min(self, axis=None, out=None, keepdims=False): + """Minimum along a given axis. + + This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure + that the full precision given by the two doubles ``jd1`` and ``jd2`` + is used, and that corresponding attributes are copied. + + Note that the ``out`` argument is present only for compatibility with + ``np.min``; since `Time` instances are immutable, it is not possible + to have an actual ``out`` to store the result in. + """ + if out is not None: + raise ValueError("Since `Time` instances are immutable, ``out`` " + "cannot be set to anything but ``None``.") + return self[self._advanced_index(self.argmin(axis), axis, keepdims)] + + def max(self, axis=None, out=None, keepdims=False): + """Maximum along a given axis. + + This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure + that the full precision given by the two doubles ``jd1`` and ``jd2`` + is used, and that corresponding attributes are copied. + + Note that the ``out`` argument is present only for compatibility with + ``np.max``; since `Time` instances are immutable, it is not possible + to have an actual ``out`` to store the result in. + """ + if out is not None: + raise ValueError("Since `Time` instances are immutable, ``out`` " + "cannot be set to anything but ``None``.") + return self[self._advanced_index(self.argmax(axis), axis, keepdims)] + + def ptp(self, axis=None, out=None, keepdims=False): + """Peak to peak (maximum - minimum) along a given axis. + + This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure + that the full precision given by the two doubles ``jd1`` and ``jd2`` + is used. + + Note that the ``out`` argument is present only for compatibility with + `~numpy.ptp`; since `Time` instances are immutable, it is not possible + to have an actual ``out`` to store the result in. + """ + if out is not None: + raise ValueError("Since `Time` instances are immutable, ``out`` " + "cannot be set to anything but ``None``.") + return (self.max(axis, keepdims=keepdims) - + self.min(axis, keepdims=keepdims)) + + def sort(self, axis=-1): + """Return a copy sorted along the specified axis. + + This is similar to :meth:`~numpy.ndarray.sort`, but internally uses + indexing with :func:`~numpy.lexsort` to ensure that the full precision + given by the two doubles ``jd1`` and ``jd2`` is kept, and that + corresponding attributes are properly sorted and copied as well. + + Parameters + ---------- + axis : int or None + Axis to be sorted. If ``None``, the flattened array is sorted. + By default, sort over the last axis. + """ + return self[self._advanced_index(self.argsort(axis), axis, + keepdims=True)] + + @lazyproperty + def cache(self): + """ + Return the cache associated with this instance. + """ + return defaultdict(dict) + + def __getattr__(self, attr): + """ + Get dynamic attributes to output format or do timescale conversion. + """ + if attr in self.SCALES and self.scale is not None: + cache = self.cache['scale'] + if attr not in cache: + if attr == self.scale: + tm = self + else: + tm = self.replicate() + tm._set_scale(attr) + cache[attr] = tm + return cache[attr] + + elif attr in self.FORMATS: + cache = self.cache['format'] + if attr not in cache: + if attr == self.format: + tm = self + else: + tm = self.replicate(format=attr) + value = tm._shaped_like_input(tm._time.to_value(parent=tm)) + cache[attr] = value + return cache[attr] + + elif attr in TIME_SCALES: # allowed ones done above (self.SCALES) + if self.scale is None: + raise ScaleValueError("Cannot convert TimeDelta with " + "undefined scale to any defined scale.") + else: + raise ScaleValueError("Cannot convert {0} with scale " + "'{1}' to scale '{2}'" + .format(self.__class__.__name__, + self.scale, attr)) + + else: + # Should raise AttributeError + return self.__getattribute__(attr) + + @override__dir__ + def __dir__(self): + result = set(self.SCALES) + result.update(self.FORMATS) + return result + + def _match_shape(self, val): + """ + Ensure that `val` is matched to length of self. If val has length 1 + then broadcast, otherwise cast to double and make sure shape matches. + """ + val = _make_array(val, copy=True) # be conservative and copy + if val.size > 1 and val.shape != self.shape: + try: + # check the value can be broadcast to the shape of self. + val = broadcast_to(val, self.shape, subok=True) + except Exception: + raise ValueError('Attribute shape must match or be ' + 'broadcastable to that of Time object. ' + 'Typically, give either a single value or ' + 'one for each time.') + + return val + + def get_delta_ut1_utc(self, iers_table=None, return_status=False): + """Find UT1 - UTC differences by interpolating in IERS Table. + + Parameters + ---------- + iers_table : ``astropy.utils.iers.IERS`` table, optional + Table containing UT1-UTC differences from IERS Bulletins A + and/or B. If `None`, use default version (see + ``astropy.utils.iers``) + return_status : bool + Whether to return status values. If `False` (default), iers + raises `IndexError` if any time is out of the range + covered by the IERS table. + + Returns + ------- + ut1_utc : float or float array + UT1-UTC, interpolated in IERS Table + status : int or int array + Status values (if ``return_status=`True```):: + ``astropy.utils.iers.FROM_IERS_B`` + ``astropy.utils.iers.FROM_IERS_A`` + ``astropy.utils.iers.FROM_IERS_A_PREDICTION`` + ``astropy.utils.iers.TIME_BEFORE_IERS_RANGE`` + ``astropy.utils.iers.TIME_BEYOND_IERS_RANGE`` + + Notes + ----- + In normal usage, UT1-UTC differences are calculated automatically + on the first instance ut1 is needed. + + Examples + -------- + To check in code whether any times are before the IERS table range:: + + >>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE + >>> t = Time(['1961-01-01', '2000-01-01'], scale='utc') + >>> delta, status = t.get_delta_ut1_utc(return_status=True) + >>> status == TIME_BEFORE_IERS_RANGE + array([ True, False]...) + """ + if iers_table is None: + from ..utils.iers import IERS + iers_table = IERS.open() + + return iers_table.ut1_utc(self.utc, return_status=return_status) + + # Property for ERFA DUT arg = UT1 - UTC + def _get_delta_ut1_utc(self, jd1=None, jd2=None): + """ + Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and + jd2 args because it gets called that way when converting time scales. + If delta_ut1_utc is not yet set, this will interpolate them from the + the IERS table. + """ + # Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in + # seconds. It is obtained from tables published by the IERS. + if not hasattr(self, '_delta_ut1_utc'): + from ..utils.iers import IERS_Auto + iers_table = IERS_Auto.open() + # jd1, jd2 are normally set (see above), except if delta_ut1_utc + # is access directly; ensure we behave as expected for that case + if jd1 is None: + self_utc = self.utc + jd1, jd2 = self_utc.jd1, self_utc.jd2 + scale = 'utc' + else: + scale = self.scale + # interpolate UT1-UTC in IERS table + delta = iers_table.ut1_utc(jd1, jd2) + # if we interpolated using UT1 jds, we may be off by one + # second near leap seconds (and very slightly off elsewhere) + if scale == 'ut1': + # calculate UTC using the offset we got; the ERFA routine + # is tolerant of leap seconds, so will do this right + jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta) + # calculate a better estimate using the nearly correct UTC + delta = iers_table.ut1_utc(jd1_utc, jd2_utc) + + self._set_delta_ut1_utc(delta) + + return self._delta_ut1_utc + + def _set_delta_ut1_utc(self, val): + if hasattr(val, 'to'): # Matches Quantity but also TimeDelta. + val = val.to(u.second).value + val = self._match_shape(val) + self._delta_ut1_utc = val + del self.cache + + # Note can't use @property because _get_delta_tdb_tt is explicitly + # called with the optional jd1 and jd2 args. + delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc) + """UT1 - UTC time scale offset""" + + # Property for ERFA DTR arg = TDB - TT + def _get_delta_tdb_tt(self, jd1=None, jd2=None): + if not hasattr(self, '_delta_tdb_tt'): + # If jd1 and jd2 are not provided (which is the case for property + # attribute access) then require that the time scale is TT or TDB. + # Otherwise the computations here are not correct. + if jd1 is None or jd2 is None: + if self.scale not in ('tt', 'tdb'): + raise ValueError('Accessing the delta_tdb_tt attribute ' + 'is only possible for TT or TDB time ' + 'scales') + else: + jd1 = self._time.jd1 + jd2 = self._time.jd2 + + # First go from the current input time (which is either + # TDB or TT) to an approximate UT1. Since TT and TDB are + # pretty close (few msec?), assume TT. Similarly, since the + # UT1 terms are very small, use UTC instead of UT1. + njd1, njd2 = erfa.tttai(jd1, jd2) + njd1, njd2 = erfa.taiutc(njd1, njd2) + # subtract 0.5, so UT is fraction of the day from midnight + ut = day_frac(njd1 - 0.5, njd2)[1] + + if self.location is None: + from ..coordinates import EarthLocation + location = EarthLocation.from_geodetic(0., 0., 0.) + else: + location = self.location + # Geodetic params needed for d_tdb_tt() + lon = location.lon + rxy = np.hypot(location.x, location.y) + z = location.z + self._delta_tdb_tt = erfa.dtdb( + jd1, jd2, ut, lon.to_value(u.radian), + rxy.to_value(u.km), z.to_value(u.km)) + + return self._delta_tdb_tt + + def _set_delta_tdb_tt(self, val): + if hasattr(val, 'to'): # Matches Quantity but also TimeDelta. + val = val.to(u.second).value + val = self._match_shape(val) + self._delta_tdb_tt = val + del self.cache + + # Note can't use @property because _get_delta_tdb_tt is explicitly + # called with the optional jd1 and jd2 args. + delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt) + """TDB - TT time scale offset""" + + def __sub__(self, other): + if not isinstance(other, Time): + try: + other = TimeDelta(other) + except Exception: + raise OperandTypeError(self, other, '-') + + # Tdelta - something is dealt with in TimeDelta, so we have + # T - Tdelta = T + # T - T = Tdelta + other_is_delta = isinstance(other, TimeDelta) + + # we need a constant scale to calculate, which is guaranteed for + # TimeDelta, but not for Time (which can be UTC) + if other_is_delta: # T - Tdelta + out = self.replicate() + if self.scale in other.SCALES: + if other.scale not in (out.scale, None): + other = getattr(other, out.scale) + else: + out._set_scale(other.scale if other.scale is not None + else 'tai') + # remove attributes that are invalidated by changing time + for attr in ('_delta_ut1_utc', '_delta_tdb_tt'): + if hasattr(out, attr): + delattr(out, attr) + + else: # T - T + self_time = (self._time if self.scale in TIME_DELTA_SCALES + else self.tai._time) + # set up TimeDelta, subtraction to be done shortly + out = TimeDelta(self_time.jd1, self_time.jd2, format='jd', + scale=self_time.scale) + + if other.scale != out.scale: + other = getattr(other, out.scale) + + jd1 = out._time.jd1 - other._time.jd1 + jd2 = out._time.jd2 - other._time.jd2 + + out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) + + if other_is_delta: + # Go back to left-side scale if needed + out._set_scale(self.scale) + + return out + + def __add__(self, other): + if not isinstance(other, Time): + try: + other = TimeDelta(other) + except Exception: + raise OperandTypeError(self, other, '+') + + # Tdelta + something is dealt with in TimeDelta, so we have + # T + Tdelta = T + # T + T = error + + if not isinstance(other, TimeDelta): + raise OperandTypeError(self, other, '+') + + # ideally, we calculate in the scale of the Time item, since that is + # what we want the output in, but this may not be possible, since + # TimeDelta cannot be converted arbitrarily + out = self.replicate() + if self.scale in other.SCALES: + if other.scale not in (out.scale, None): + other = getattr(other, out.scale) + else: + out._set_scale(other.scale if other.scale is not None else 'tai') + + # remove attributes that are invalidated by changing time + for attr in ('_delta_ut1_utc', '_delta_tdb_tt'): + if hasattr(out, attr): + delattr(out, attr) + + jd1 = out._time.jd1 + other._time.jd1 + jd2 = out._time.jd2 + other._time.jd2 + + out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) + + # Go back to left-side scale if needed + out._set_scale(self.scale) + + return out + + def __radd__(self, other): + return self.__add__(other) + + def __rsub__(self, other): + out = self.__sub__(other) + return -out + + def _time_difference(self, other, op=None): + """If other is of same class as self, return difference in self.scale. + Otherwise, raise OperandTypeError. + """ + if other.__class__ is not self.__class__: + try: + other = self.__class__(other, scale=self.scale) + except Exception: + raise OperandTypeError(self, other, op) + + if(self.scale is not None and self.scale not in other.SCALES or + other.scale is not None and other.scale not in self.SCALES): + raise TypeError("Cannot compare TimeDelta instances with scales " + "'{0}' and '{1}'".format(self.scale, other.scale)) + + if self.scale is not None and other.scale is not None: + other = getattr(other, self.scale) + + return (self.jd1 - other.jd1) + (self.jd2 - other.jd2) + + def __lt__(self, other): + return self._time_difference(other, '<') < 0. + + def __le__(self, other): + return self._time_difference(other, '<=') <= 0. + + def __eq__(self, other): + """ + If other is an incompatible object for comparison, return `False`. + Otherwise, return `True` if the time difference between self and + other is zero. + """ + try: + diff = self._time_difference(other) + except OperandTypeError: + return False + return diff == 0. + + def __ne__(self, other): + """ + If other is an incompatible object for comparison, return `True`. + Otherwise, return `False` if the time difference between self and + other is zero. + """ + try: + diff = self._time_difference(other) + except OperandTypeError: + return True + return diff != 0. + + def __gt__(self, other): + return self._time_difference(other, '>') > 0. + + def __ge__(self, other): + return self._time_difference(other, '>=') >= 0. + + def to_datetime(self, timezone=None): + tm = self.replicate(format='datetime') + return tm._shaped_like_input(tm._time.to_value(timezone)) + + to_datetime.__doc__ = TimeDatetime.to_value.__doc__ + + +class TimeDelta(Time): + """ + Represent the time difference between two times. + + A TimeDelta object is initialized with one or more times in the ``val`` + argument. The input times in ``val`` must conform to the specified + ``format``. The optional ``val2`` time input should be supplied only for + numeric input formats (e.g. JD) where very high precision (better than + 64-bit precision) is required. + + The allowed values for ``format`` can be listed with:: + + >>> list(TimeDelta.FORMATS) + ['sec', 'jd'] + + Note that for time differences, the scale can be among three groups: + geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational + ('ut1'). Within each of these, the scales for time differences are the + same. Conversion between geocentric and barycentric is possible, as there + is only a scale factor change, but one cannot convert to or from 'ut1', as + this requires knowledge of the actual times, not just their difference. For + a similar reason, 'utc' is not a valid scale for a time difference: a UTC + day is not always 86400 seconds. + + Parameters + ---------- + val : numpy ndarray, list, str, number, or `~astropy.time.TimeDelta` object + Data to initialize table. + val2 : numpy ndarray, list, str, or number; optional + Data to initialize table. + format : str, optional + Format of input value(s) + scale : str, optional + Time scale of input value(s), must be one of the following values: + ('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or + ``None``), the scale is arbitrary; when added or subtracted from a + ``Time`` instance, it will be used without conversion. + copy : bool, optional + Make a copy of the input values + """ + SCALES = TIME_DELTA_SCALES + """List of time delta scales.""" + + FORMATS = TIME_DELTA_FORMATS + """Dict of time delta formats.""" + + info = TimeDeltaInfo() + + def __init__(self, val, val2=None, format=None, scale=None, copy=False): + if isinstance(val, TimeDelta): + if scale is not None: + self._set_scale(scale) + else: + if format is None: + try: + val = val.to(u.day) + if val2 is not None: + val2 = val2.to(u.day) + except Exception: + raise ValueError('Only Quantities with Time units can ' + 'be used to initiate {0} instances .' + .format(self.__class__.__name__)) + format = 'jd' + + self._init_from_vals(val, val2, format, scale, copy) + + if scale is not None: + self.SCALES = TIME_DELTA_TYPES[scale] + + def replicate(self, *args, **kwargs): + out = super(TimeDelta, self).replicate(*args, **kwargs) + out.SCALES = self.SCALES + return out + + def _set_scale(self, scale): + """ + This is the key routine that actually does time scale conversions. + This is not public and not connected to the read-only scale property. + """ + + if scale == self.scale: + return + if scale not in self.SCALES: + raise ValueError("Scale {0!r} is not in the allowed scales {1}" + .format(scale, sorted(self.SCALES))) + + # For TimeDelta, there can only be a change in scale factor, + # which is written as time2 - time1 = scale_offset * time1 + scale_offset = SCALE_OFFSETS[(self.scale, scale)] + if scale_offset is None: + self._time.scale = scale + else: + jd1, jd2 = self._time.jd1, self._time.jd2 + offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset) + self._time = self.FORMATS[self.format]( + jd1 + offset1, jd2 + offset2, scale, + self.precision, self.in_subfmt, + self.out_subfmt, from_jd=True) + + def __add__(self, other): + # only deal with TimeDelta + TimeDelta + if isinstance(other, Time): + if not isinstance(other, TimeDelta): + return other.__add__(self) + else: + try: + other = TimeDelta(other) + except Exception: + raise OperandTypeError(self, other, '+') + + # the scales should be compatible (e.g., cannot convert TDB to TAI) + if(self.scale is not None and self.scale not in other.SCALES or + other.scale is not None and other.scale not in self.SCALES): + raise TypeError("Cannot add TimeDelta instances with scales " + "'{0}' and '{1}'".format(self.scale, other.scale)) + + # adjust the scale of other if the scale of self is set (or no scales) + if self.scale is not None or other.scale is None: + out = self.replicate() + if other.scale is not None: + other = getattr(other, self.scale) + else: + out = other.replicate() + + jd1 = self._time.jd1 + other._time.jd1 + jd2 = self._time.jd2 + other._time.jd2 + + out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) + + return out + + def __sub__(self, other): + # only deal with TimeDelta - TimeDelta + if isinstance(other, Time): + if not isinstance(other, TimeDelta): + raise OperandTypeError(self, other, '-') + else: + try: + other = TimeDelta(other) + except Exception: + raise OperandTypeError(self, other, '-') + + # the scales should be compatible (e.g., cannot convert TDB to TAI) + if(self.scale is not None and self.scale not in other.SCALES or + other.scale is not None and other.scale not in self.SCALES): + raise TypeError("Cannot subtract TimeDelta instances with scales " + "'{0}' and '{1}'".format(self.scale, other.scale)) + + # adjust the scale of other if the scale of self is set (or no scales) + if self.scale is not None or other.scale is None: + out = self.replicate() + if other.scale is not None: + other = getattr(other, self.scale) + else: + out = other.replicate() + + jd1 = self._time.jd1 - other._time.jd1 + jd2 = self._time.jd2 - other._time.jd2 + + out._time.jd1, out._time.jd2 = day_frac(jd1, jd2) + + return out + + def __neg__(self): + """Negation of a `TimeDelta` object.""" + new = self.copy() + new._time.jd1 = -self._time.jd1 + new._time.jd2 = -self._time.jd2 + return new + + def __abs__(self): + """Absolute value of a `TimeDelta` object.""" + jd1, jd2 = self._time.jd1, self._time.jd2 + negative = jd1 + jd2 < 0 + new = self.copy() + new._time.jd1 = np.where(negative, -jd1, jd1) + new._time.jd2 = np.where(negative, -jd2, jd2) + return new + + def __mul__(self, other): + """Multiplication of `TimeDelta` objects by numbers/arrays.""" + # check needed since otherwise the self.jd1 * other multiplication + # would enter here again (via __rmul__) + if isinstance(other, Time): + raise OperandTypeError(self, other, '*') + + try: # convert to straight float if dimensionless quantity + other = other.to(1) + except Exception: + pass + + try: + jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other) + out = TimeDelta(jd1, jd2, format='jd', scale=self.scale) + except Exception as err: # try downgrading self to a quantity + try: + return self.to(u.day) * other + except Exception: + raise err + + if self.format != 'jd': + out = out.replicate(format=self.format) + return out + + def __rmul__(self, other): + """Multiplication of numbers/arrays with `TimeDelta` objects.""" + return self.__mul__(other) + + def __div__(self, other): + """Division of `TimeDelta` objects by numbers/arrays.""" + return self.__truediv__(other) + + def __rdiv__(self, other): + """Division by `TimeDelta` objects of numbers/arrays.""" + return self.__rtruediv__(other) + + def __truediv__(self, other): + """Division of `TimeDelta` objects by numbers/arrays.""" + # cannot do __mul__(1./other) as that looses precision + try: + other = other.to(1) + except Exception: + pass + + try: # convert to straight float if dimensionless quantity + jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other) + out = TimeDelta(jd1, jd2, format='jd', scale=self.scale) + except Exception as err: # try downgrading self to a quantity + try: + return self.to(u.day) / other + except Exception: + raise err + + if self.format != 'jd': + out = out.replicate(format=self.format) + return out + + def __rtruediv__(self, other): + """Division by `TimeDelta` objects of numbers/arrays.""" + return other / self.to(u.day) + + def to(self, *args, **kwargs): + return u.Quantity(self._time.jd1 + self._time.jd2, + u.day).to(*args, **kwargs) + + +class ScaleValueError(Exception): + pass + + +def _make_array(val, copy=False): + """ + Take ``val`` and convert/reshape to an array. If ``copy`` is `True` + then copy input values. + + Returns + ------- + val : ndarray + Array version of ``val``. + """ + val = np.array(val, copy=copy, subok=True) + + # Allow only float64, string or object arrays as input + # (object is for datetime, maybe add more specific test later?) + # This also ensures the right byteorder for float64 (closes #2942). + if not (val.dtype == np.float64 or val.dtype.kind in 'OSUa'): + val = np.asanyarray(val, dtype=np.float64) + + return val + + +class OperandTypeError(TypeError): + def __init__(self, left, right, op=None): + op_string = '' if op is None else ' for {0}'.format(op) + super(OperandTypeError, self).__init__( + "Unsupported operand type(s){0}: " + "'{1}' and '{2}'".format(op_string, + left.__class__.__name__, + right.__class__.__name__)) diff --git a/astropy/time/formats.py b/astropy/time/formats.py new file mode 100644 index 0000000..7c7ef20 --- /dev/null +++ b/astropy/time/formats.py @@ -0,0 +1,1150 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import fnmatch +import time +import re +import datetime +from collections import OrderedDict + +import numpy as np + +from .. import units as u +from .. import _erfa as erfa +from ..extern import six +from ..extern.six.moves import zip +from .utils import day_frac, two_sum + + +__all__ = ['TimeFormat', 'TimeJD', 'TimeMJD', 'TimeFromEpoch', 'TimeUnix', + 'TimeCxcSec', 'TimeGPS', 'TimeDecimalYear', + 'TimePlotDate', 'TimeUnique', 'TimeDatetime', 'TimeString', + 'TimeISO', 'TimeISOT', 'TimeFITS', 'TimeYearDayTime', + 'TimeEpochDate', 'TimeBesselianEpoch', 'TimeJulianEpoch', + 'TimeDeltaFormat', 'TimeDeltaSec', 'TimeDeltaJD', + 'TimeEpochDateString', 'TimeBesselianEpochString', + 'TimeJulianEpochString', 'TIME_FORMATS', 'TIME_DELTA_FORMATS', + 'TimezoneInfo'] + +__doctest_skip__ = ['TimePlotDate'] + +# These both get filled in at end after TimeFormat subclasses defined. +# Use an OrderedDict to fix the order in which formats are tried. +# This ensures, e.g., that 'isot' gets tried before 'fits'. +TIME_FORMATS = OrderedDict() +TIME_DELTA_FORMATS = OrderedDict() + +# Translations between deprecated FITS timescales defined by +# Rots et al. 2015, A&A 574:A36, and timescales used here. +FITS_DEPRECATED_SCALES = {'TDT': 'tt', 'ET': 'tt', + 'GMT': 'utc', 'UT': 'utc', 'IAT': 'tai'} + + +def _regexify_subfmts(subfmts): + """ + Iterate through each of the sub-formats and try substituting simple + regular expressions for the strptime codes for year, month, day-of-month, + hour, minute, second. If no % characters remain then turn the final string + into a compiled regex. This assumes time formats do not have a % in them. + + This is done both to speed up parsing of strings and to allow mixed formats + where strptime does not quite work well enough. + """ + new_subfmts = [] + for subfmt_tuple in subfmts: + subfmt_in = subfmt_tuple[1] + for strptime_code, regex in (('%Y', r'(?P\d\d\d\d)'), + ('%m', r'(?P\d{1,2})'), + ('%d', r'(?P\d{1,2})'), + ('%H', r'(?P\d{1,2})'), + ('%M', r'(?P\d{1,2})'), + ('%S', r'(?P\d{1,2})')): + subfmt_in = subfmt_in.replace(strptime_code, regex) + + if '%' not in subfmt_in: + subfmt_tuple = (subfmt_tuple[0], + re.compile(subfmt_in + '$'), + subfmt_tuple[2]) + new_subfmts.append(subfmt_tuple) + + return tuple(new_subfmts) + + +class TimeFormatMeta(type): + """ + Metaclass that adds `TimeFormat` and `TimeDeltaFormat` to the + `TIME_FORMATS` and `TIME_DELTA_FORMATS` registries, respectively. + """ + + _registry = TIME_FORMATS + + def __new__(mcls, name, bases, members): + cls = super(TimeFormatMeta, mcls).__new__(mcls, name, bases, members) + + # Register time formats that have a name, but leave out astropy_time since + # it is not a user-accessible format and is only used for initialization into + # a different format. + if 'name' in members and cls.name != 'astropy_time': + mcls._registry[cls.name] = cls + + if 'subfmts' in members: + cls.subfmts = _regexify_subfmts(members['subfmts']) + + return cls + + +@six.add_metaclass(TimeFormatMeta) +class TimeFormat(object): + """ + Base class for time representations. + + Parameters + ---------- + val1 : numpy ndarray, list, str, or number + Data to initialize table. + val2 : numpy ndarray, list, str, or number; optional + Data to initialize table. + scale : str + Time scale of input value(s) + precision : int + Precision for seconds as floating point + in_subfmt : str + Select subformat for inputting string times + out_subfmt : str + Select subformat for outputting string times + from_jd : bool + If true then val1, val2 are jd1, jd2 + """ + + def __init__(self, val1, val2, scale, precision, + in_subfmt, out_subfmt, from_jd=False): + self.scale = scale # validation of scale done later with _check_scale + self.precision = precision + self.in_subfmt = in_subfmt + self.out_subfmt = out_subfmt + + if from_jd: + self.jd1 = val1 + self.jd2 = val2 + else: + val1, val2 = self._check_val_type(val1, val2) + self.set_jds(val1, val2) + + def __len__(self): + return len(self.jd1) + + @property + def scale(self): + """Time scale""" + self._scale = self._check_scale(self._scale) + return self._scale + + @scale.setter + def scale(self, val): + self._scale = val + + def _check_val_type(self, val1, val2): + """Input value validation, typically overridden by derived classes""" + if not (val1.dtype == np.double and np.all(np.isfinite(val1)) and + (val2 is None or + val2.dtype == np.double and np.all(np.isfinite(val2)))): + raise TypeError('Input values for {0} class must be finite doubles' + .format(self.name)) + + if getattr(val1, 'unit', None) is not None: + # Possibly scaled unit any quantity-likes should be converted to + _unit = u.CompositeUnit(getattr(self, 'unit', 1.), [u.day], [1]) + val1 = u.Quantity(val1, copy=False).to_value(_unit) + if val2 is not None: + val2 = u.Quantity(val2, copy=False).to_value(_unit) + elif getattr(val2, 'unit', None) is not None: + raise TypeError('Cannot mix float and Quantity inputs') + + if val2 is None: + val2 = np.zeros_like(val1) + + def asarray_or_scalar(val): + """ + Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray + or a Python or numpy scalar. + """ + return np.asarray(val) if isinstance(val, np.ndarray) else val + + return asarray_or_scalar(val1), asarray_or_scalar(val2) + + def _check_scale(self, scale): + """ + Return a validated scale value. + + If there is a class attribute 'scale' then that defines the default / + required time scale for this format. In this case if a scale value was + provided that needs to match the class default, otherwise return + the class default. + + Otherwise just make sure that scale is in the allowed list of + scales. Provide a different error message if `None` (no value) was + supplied. + """ + if hasattr(self.__class__, 'epoch_scale') and scale is None: + scale = self.__class__.epoch_scale + + if scale is None: + scale = 'utc' # Default scale as of astropy 0.4 + + if scale not in TIME_SCALES: + raise ScaleValueError("Scale value '{0}' not in " + "allowed values {1}" + .format(scale, TIME_SCALES)) + + return scale + + def set_jds(self, val1, val2): + """ + Set internal jd1 and jd2 from val1 and val2. Must be provided + by derived classes. + """ + raise NotImplementedError + + def to_value(self, parent=None): + """ + Return time representation from internal jd1 and jd2. This is + the base method that ignores ``parent`` and requires that + subclasses implement the ``value`` property. Subclasses that + require ``parent`` or have other optional args for ``to_value`` + should compute and return the value directly. + """ + return self.value + + @property + def value(self): + raise NotImplementedError + + +class TimeJD(TimeFormat): + """ + Julian Date time format. + This represents the number of days since the beginning of + the Julian Period. + For example, 2451544.5 in JD is midnight on January 1, 2000. + """ + name = 'jd' + + def set_jds(self, val1, val2): + self._check_scale(self._scale) # Validate scale. + self.jd1, self.jd2 = day_frac(val1, val2) + + @property + def value(self): + return self.jd1 + self.jd2 + + +class TimeMJD(TimeFormat): + """ + Modified Julian Date time format. + This represents the number of days since midnight on November 17, 1858. + For example, 51544.0 in MJD is midnight on January 1, 2000. + """ + name = 'mjd' + + def set_jds(self, val1, val2): + # TODO - this routine and vals should be Cythonized to follow the ERFA + # convention of preserving precision by adding to the larger of the two + # values in a vectorized operation. But in most practical cases the + # first one is probably biggest. + self._check_scale(self._scale) # Validate scale. + jd1, jd2 = day_frac(val1, val2) + jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h) + self.jd1, self.jd2 = day_frac(jd1, jd2) + + @property + def value(self): + return (self.jd1 - erfa.DJM0) + self.jd2 + + +class TimeDecimalYear(TimeFormat): + """ + Time as a decimal year, with integer values corresponding to midnight + of the first day of each year. For example 2000.5 corresponds to the + ISO time '2000-07-02 00:00:00'. + """ + name = 'decimalyear' + + def set_jds(self, val1, val2): + self._check_scale(self._scale) # Validate scale. + + sum12, err12 = two_sum(val1, val2) + iy_start = np.trunc(sum12).astype(np.int) + extra, y_frac = two_sum(sum12, -iy_start) + y_frac += extra + err12 + + val = (val1 + val2).astype(np.double) + iy_start = np.trunc(val).astype(np.int) + + imon = np.ones_like(iy_start) + iday = np.ones_like(iy_start) + ihr = np.zeros_like(iy_start) + imin = np.zeros_like(iy_start) + isec = np.zeros_like(y_frac) + + # Possible enhancement: use np.unique to only compute start, stop + # for unique values of iy_start. + scale = self.scale.upper().encode('ascii') + jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, + ihr, imin, isec) + jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, + ihr, imin, isec) + + t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd') + t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd') + t_frac = t_start + (t_end - t_start) * y_frac + + self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2) + + @property + def value(self): + scale = self.scale.upper().encode('ascii') + iy_start, ims, ids, ihmsfs = erfa.d2dtf(scale, 0, # precision=0 + self.jd1, self.jd2) + imon = np.ones_like(iy_start) + iday = np.ones_like(iy_start) + ihr = np.zeros_like(iy_start) + imin = np.zeros_like(iy_start) + isec = np.zeros_like(self.jd1) + + # Possible enhancement: use np.unique to only compute start, stop + # for unique values of iy_start. + scale = self.scale.upper().encode('ascii') + jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday, + ihr, imin, isec) + jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday, + ihr, imin, isec) + + dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start) + dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start) + decimalyear = iy_start + dt / dt_end + + return decimalyear + + +class TimeFromEpoch(TimeFormat): + """ + Base class for times that represent the interval from a particular + epoch as a floating point multiple of a unit time interval (e.g. seconds + or days). + """ + + def __init__(self, val1, val2, scale, precision, + in_subfmt, out_subfmt, from_jd=False): + self.scale = scale + # Initialize the reference epoch (a single time defined in subclasses) + epoch = Time(self.epoch_val, self.epoch_val2, scale=self.epoch_scale, + format=self.epoch_format) + self.epoch = epoch + + # Now create the TimeFormat object as normal + super(TimeFromEpoch, self).__init__(val1, val2, scale, precision, + in_subfmt, out_subfmt, from_jd) + + def set_jds(self, val1, val2): + """ + Initialize the internal jd1 and jd2 attributes given val1 and val2. + For an TimeFromEpoch subclass like TimeUnix these will be floats giving + the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00). + """ + # Form new JDs based on epoch time + time from epoch (converted to JD). + # One subtlety that might not be obvious is that 1.000 Julian days in + # UTC can be 86400 or 86401 seconds. For the TimeUnix format the + # assumption is that every day is exactly 86400 seconds, so this is, in + # principle, doing the math incorrectly, *except* that it matches the + # definition of Unix time which does not include leap seconds. + + # note: use divisor=1./self.unit, since this is either 1 or 1/86400, + # and 1/86400 is not exactly representable as a float64, so multiplying + # by that will cause rounding errors. (But inverting it as a float64 + # recovers the exact number) + day, frac = day_frac(val1, val2, divisor=1. / self.unit) + + jd1 = self.epoch.jd1 + day + jd2 = self.epoch.jd2 + frac + + # Create a temporary Time object corresponding to the new (jd1, jd2) in + # the epoch scale (e.g. UTC for TimeUnix) then convert that to the + # desired time scale for this object. + # + # A known limitation is that the transform from self.epoch_scale to + # self.scale cannot involve any metadata like lat or lon. + try: + tm = getattr(Time(jd1, jd2, scale=self.epoch_scale, + format='jd'), self.scale) + except Exception as err: + raise ScaleValueError("Cannot convert from '{0}' epoch scale '{1}'" + "to specified scale '{2}', got error:\n{3}" + .format(self.name, self.epoch_scale, + self.scale, err)) + + self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2) + + def to_value(self, parent=None): + # Make sure that scale is the same as epoch scale so we can just + # subtract the epoch and convert + if self.scale != self.epoch_scale: + if parent is None: + raise ValueError('cannot compute value without parent Time object') + tm = getattr(parent, self.epoch_scale) + jd1, jd2 = tm._time.jd1, tm._time.jd2 + else: + jd1, jd2 = self.jd1, self.jd2 + + time_from_epoch = ((jd1 - self.epoch.jd1) + + (jd2 - self.epoch.jd2)) / self.unit + return time_from_epoch + + value = property(to_value) + + +class TimeUnix(TimeFromEpoch): + """ + Unix time: seconds from 1970-01-01 00:00:00 UTC. + For example, 946684800.0 in Unix time is midnight on January 1, 2000. + + NOTE: this quantity is not exactly unix time and differs from the strict + POSIX definition by up to 1 second on days with a leap second. POSIX + unix time actually jumps backward by 1 second at midnight on leap second + days while this class value is monotonically increasing at 86400 seconds + per UTC day. + """ + name = 'unix' + unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds) + epoch_val = '1970-01-01 00:00:00' + epoch_val2 = None + epoch_scale = 'utc' + epoch_format = 'iso' + + +class TimeCxcSec(TimeFromEpoch): + """ + Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT. + For example, 63072064.184 is midnight on January 1, 2000. + """ + name = 'cxcsec' + unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds) + epoch_val = '1998-01-01 00:00:00' + epoch_val2 = None + epoch_scale = 'tt' + epoch_format = 'iso' + + +class TimeGPS(TimeFromEpoch): + """GPS time: seconds from 1980-01-06 00:00:00 UTC + For example, 630720013.0 is midnight on January 1, 2000. + + Notes + ===== + This implementation is strictly a representation of the number of seconds + (including leap seconds) since midnight UTC on 1980-01-06. GPS can also be + considered as a time scale which is ahead of TAI by a fixed offset + (to within about 100 nanoseconds). + + For details, see http://tycho.usno.navy.mil/gpstt.html + """ + name = 'gps' + unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds) + epoch_val = '1980-01-06 00:00:19' + # above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai + epoch_val2 = None + epoch_scale = 'tai' + epoch_format = 'iso' + + +class TimePlotDate(TimeFromEpoch): + """ + Matplotlib `~matplotlib.pyplot.plot_date` input: + 1 + number of days from 0001-01-01 00:00:00 UTC + + This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date` + function:: + + >>> import matplotlib.pyplot as plt + >>> jyear = np.linspace(2000, 2001, 20) + >>> t = Time(jyear, format='jyear', scale='utc') + >>> plt.plot_date(t.plot_date, jyear) + >>> plt.gcf().autofmt_xdate() # orient date labels at a slant + >>> plt.draw() + + For example, 730120.0003703703 is midnight on January 1, 2000. + """ + # This corresponds to the zero reference time for matplotlib plot_date(). + # Note that TAI and UTC are equivalent at the reference time. + name = 'plot_date' + unit = 1.0 + epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1 + epoch_val2 = None + epoch_scale = 'utc' + epoch_format = 'jd' + + +class TimeUnique(TimeFormat): + """ + Base class for time formats that can uniquely create a time object + without requiring an explicit format specifier. This class does + nothing but provide inheritance to identify a class as unique. + """ + + +class TimeAstropyTime(TimeUnique): + """ + Instantiate date from an Astropy Time object (or list thereof). + + This is purely for instantiating from a Time object. The output + format is the same as the first time instance. + """ + name = 'astropy_time' + + def __new__(cls, val1, val2, scale, precision, + in_subfmt, out_subfmt, from_jd=False): + """ + Use __new__ instead of __init__ to output a class instance that + is the same as the class of the first Time object in the list. + """ + val1_0 = val1.flat[0] + if not (isinstance(val1_0, Time) and all(type(val) is type(val1_0) + for val in val1.flat)): + raise TypeError('Input values for {0} class must all be same ' + 'astropy Time type.'.format(cls.name)) + + if scale is None: + scale = val1_0.scale + if val1.shape: + vals = [getattr(val, scale)._time for val in val1] + jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals]) + jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals]) + else: + val = getattr(val1_0, scale)._time + jd1, jd2 = val.jd1, val.jd2 + + OutTimeFormat = val1_0._time.__class__ + self = OutTimeFormat(jd1, jd2, scale, precision, in_subfmt, out_subfmt, + from_jd=True) + + return self + + +class TimeDatetime(TimeUnique): + """ + Represent date as Python standard library `~datetime.datetime` object + + Example:: + + >>> from astropy.time import Time + >>> from datetime import datetime + >>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc') + >>> t.iso + '2000-01-02 12:00:00.000' + >>> t.tt.datetime + datetime.datetime(2000, 1, 2, 12, 1, 4, 184000) + """ + name = 'datetime' + + def _check_val_type(self, val1, val2): + # Note: don't care about val2 for this class + if not all(isinstance(val, datetime.datetime) for val in val1.flat): + raise TypeError('Input values for {0} class must be ' + 'datetime objects'.format(self.name)) + return val1, None + + def set_jds(self, val1, val2): + """Convert datetime object contained in val1 to jd1, jd2""" + # Iterate through the datetime objects, getting year, month, etc. + iterator = np.nditer([val1, None, None, None, None, None, None], + flags=['refs_ok'], + op_dtypes=[np.object] + 5*[np.intc] + [np.double]) + for val, iy, im, id, ihr, imin, dsec in iterator: + dt = val.item() + + if dt.tzinfo is not None: + dt = (dt - dt.utcoffset()).replace(tzinfo=None) + + iy[...] = dt.year + im[...] = dt.month + id[...] = dt.day + ihr[...] = dt.hour + imin[...] = dt.minute + dsec[...] = dt.second + dt.microsecond / 1e6 + + jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'), + *iterator.operands[1:]) + self.jd1, self.jd2 = day_frac(jd1, jd2) + + def to_value(self, timezone=None, parent=None): + """ + Convert to (potentially timezone-aware) `~datetime.datetime` object. + + If ``timezone`` is not ``None``, return a timezone-aware datetime + object. + + Parameters + ---------- + timezone : {`~datetime.tzinfo`, None} (optional) + If not `None`, return timezone-aware datetime. + + Returns + ------- + `~datetime.datetime` + If ``timezone`` is not ``None``, output will be timezone-aware. + """ + if timezone is not None: + if self._scale != 'utc': + raise ScaleValueError("scale is {}, must be 'utc' when timezone " + "is supplied.".format(self._scale)) + + # Rather than define a value property directly, we have a function, + # since we want to be able to pass in timezone information. + scale = self.scale.upper().encode('ascii') + iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 6, # 6 for microsec + self.jd1, self.jd2) + ihrs = ihmsfs[..., 0] + imins = ihmsfs[..., 1] + isecs = ihmsfs[..., 2] + ifracs = ihmsfs[..., 3] + iterator = np.nditer([iys, ims, ids, ihrs, imins, isecs, ifracs, None], + flags=['refs_ok'], + op_dtypes=7*[iys.dtype] + [np.object]) + + for iy, im, id, ihr, imin, isec, ifracsec, out in iterator: + if isec >= 60: + raise ValueError('Time {} is within a leap second but datetime ' + 'does not support leap seconds' + .format((iy, im, id, ihr, imin, isec, ifracsec))) + if timezone is not None: + out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec, + tzinfo=TimezoneInfo()).astimezone(timezone) + else: + out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec) + return iterator.operands[-1] + + value = property(to_value) + + +class TimezoneInfo(datetime.tzinfo): + """ + Subclass of the `~datetime.tzinfo` object, used in the + to_datetime method to specify timezones. + + It may be safer in most cases to use a timezone database package like + pytz rather than defining your own timezones - this class is mainly + a workaround for users without pytz. + """ + @u.quantity_input(utc_offset=u.day, dst=u.day) + def __init__(self, utc_offset=0*u.day, dst=0*u.day, tzname=None): + """ + Parameters + ---------- + utc_offset : `~astropy.units.Quantity` (optional) + Offset from UTC in days. Defaults to zero. + dst : `~astropy.units.Quantity` (optional) + Daylight Savings Time offset in days. Defaults to zero + (no daylight savings). + tzname : string, `None` (optional) + Name of timezone + + Examples + -------- + >>> from datetime import datetime + >>> from astropy.time import TimezoneInfo # Specifies a timezone + >>> import astropy.units as u + >>> utc = TimezoneInfo() # Defaults to UTC + >>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1 + >>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour) + >>> print(dt_aware) + 2000-01-01 00:00:00+01:00 + >>> print(dt_aware.astimezone(utc)) + 1999-12-31 23:00:00+00:00 + """ + if utc_offset == 0 and dst == 0 and tzname is None: + tzname = 'UTC' + self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day)) + self._tzname = tzname + self._dst = datetime.timedelta(dst.to_value(u.day)) + + def utcoffset(self, dt): + return self._utcoffset + + def tzname(self, dt): + return str(self._tzname) + + def dst(self, dt): + return self._dst + + +class TimeString(TimeUnique): + """ + Base class for string-like time representations. + + This class assumes that anything following the last decimal point to the + right is a fraction of a second. + + This is a reference implementation can be made much faster with effort. + """ + + def _check_val_type(self, val1, val2): + # Note: don't care about val2 for these classes + if val1.dtype.kind not in ('S', 'U'): + raise TypeError('Input values for {0} class must be strings' + .format(self.name)) + return val1, None + + def parse_string(self, timestr, subfmts): + """Read time from a single string, using a set of possible formats.""" + # Datetime components required for conversion to JD by ERFA, along + # with the default values. + components = ('year', 'mon', 'mday', 'hour', 'min', 'sec') + defaults = (None, 1, 1, 0, 0, 0) + # Assume that anything following "." on the right side is a + # floating fraction of a second. + try: + idot = timestr.rindex('.') + except Exception: + fracsec = 0.0 + else: + timestr, fracsec = timestr[:idot], timestr[idot:] + fracsec = float(fracsec) + + for _, strptime_fmt_or_regex, _ in subfmts: + if isinstance(strptime_fmt_or_regex, six.string_types): + try: + tm = time.strptime(timestr, strptime_fmt_or_regex) + except ValueError: + continue + else: + vals = [getattr(tm, 'tm_' + component) + for component in components] + + else: + tm = re.match(strptime_fmt_or_regex, timestr) + if tm is None: + continue + tm = tm.groupdict() + vals = [int(tm.get(component, default)) for component, default + in zip(components, defaults)] + + # Add fractional seconds + vals[-1] = vals[-1] + fracsec + return vals + else: + raise ValueError('Time {0} does not match {1} format' + .format(timestr, self.name)) + + def set_jds(self, val1, val2): + """Parse the time strings contained in val1 and set jd1, jd2""" + # Select subformats based on current self.in_subfmt + subfmts = self._select_subfmts(self.in_subfmt) + + iterator = np.nditer([val1, None, None, None, None, None, None], + op_dtypes=[val1.dtype] + 5*[np.intc] + [np.double]) + + for val, iy, im, id, ihr, imin, dsec in iterator: + iy[...], im[...], id[...], ihr[...], imin[...], dsec[...] = ( + self.parse_string(val.item(), subfmts)) + + jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'), + *iterator.operands[1:]) + self.jd1, self.jd2 = day_frac(jd1, jd2) + + def str_kwargs(self): + """ + Generator that yields a dict of values corresponding to the + calendar date and time for the internal JD values. + """ + scale = self.scale.upper().encode('ascii'), + iys, ims, ids, ihmsfs = erfa.d2dtf(scale, self.precision, + self.jd1, self.jd2) + + # Get the str_fmt element of the first allowed output subformat + _, _, str_fmt = self._select_subfmts(self.out_subfmt)[0] + + if '{yday:' in str_fmt: + has_yday = True + else: + has_yday = False + yday = None + + ihrs = ihmsfs[..., 0] + imins = ihmsfs[..., 1] + isecs = ihmsfs[..., 2] + ifracs = ihmsfs[..., 3] + for iy, im, id, ihr, imin, isec, ifracsec in np.nditer( + [iys, ims, ids, ihrs, imins, isecs, ifracs]): + if has_yday: + yday = datetime.datetime(iy, im, id).timetuple().tm_yday + + yield {'year': int(iy), 'mon': int(im), 'day': int(id), + 'hour': int(ihr), 'min': int(imin), 'sec': int(isec), + 'fracsec': int(ifracsec), 'yday': yday} + + def format_string(self, str_fmt, **kwargs): + """Write time to a string using a given format. + + By default, just interprets str_fmt as a format string, + but subclasses can add to this. + """ + return str_fmt.format(**kwargs) + + @property + def value(self): + # Select the first available subformat based on current + # self.out_subfmt + subfmts = self._select_subfmts(self.out_subfmt) + _, _, str_fmt = subfmts[0] + + # TODO: fix this ugly hack + if self.precision > 0 and str_fmt.endswith('{sec:02d}'): + str_fmt += '.{fracsec:0' + str(self.precision) + 'd}' + + # Try to optimize this later. Can't pre-allocate because length of + # output could change, e.g. year rolls from 999 to 1000. + outs = [] + for kwargs in self.str_kwargs(): + outs.append(str(self.format_string(str_fmt, **kwargs))) + + return np.array(outs).reshape(self.jd1.shape) + + def _select_subfmts(self, pattern): + """ + Return a list of subformats where name matches ``pattern`` using + fnmatch. + """ + + fnmatchcase = fnmatch.fnmatchcase + subfmts = [x for x in self.subfmts if fnmatchcase(x[0], pattern)] + if len(subfmts) == 0: + raise ValueError('No subformats match {0}'.format(pattern)) + return subfmts + + +class TimeISO(TimeString): + """ + ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...". + For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000. + + The allowed subformats are: + + - 'date_hms': date + hours, mins, secs (and optional fractional secs) + - 'date_hm': date + hours, mins + - 'date': date + """ + + name = 'iso' + subfmts = (('date_hms', + '%Y-%m-%d %H:%M:%S', + # XXX To Do - use strftime for output ?? + '{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}'), + ('date_hm', + '%Y-%m-%d %H:%M', + '{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}'), + ('date', + '%Y-%m-%d', + '{year:d}-{mon:02d}-{day:02d}')) + + def parse_string(self, timestr, subfmts): + # Handle trailing 'Z' for UTC time + if timestr.endswith('Z'): + if self.scale != 'utc': + raise ValueError("Time input terminating in 'Z' must have " + "scale='UTC'") + timestr = timestr[:-1] + return super(TimeISO, self).parse_string(timestr, subfmts) + + +class TimeISOT(TimeISO): + """ + ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...". + This is the same as TimeISO except for a "T" instead of space between + the date and time. + For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000. + + The allowed subformats are: + + - 'date_hms': date + hours, mins, secs (and optional fractional secs) + - 'date_hm': date + hours, mins + - 'date': date + """ + + name = 'isot' + subfmts = (('date_hms', + '%Y-%m-%dT%H:%M:%S', + '{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'), + ('date_hm', + '%Y-%m-%dT%H:%M', + '{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}'), + ('date', + '%Y-%m-%d', + '{year:d}-{mon:02d}-{day:02d}')) + + +class TimeYearDayTime(TimeISO): + """ + Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...". + The day-of-year (DOY) goes from 001 to 365 (366 in leap years). + For example, 2000:001:00:00:00.000 is midnight on January 1, 2000. + + The allowed subformats are: + + - 'date_hms': date + hours, mins, secs (and optional fractional secs) + - 'date_hm': date + hours, mins + - 'date': date + """ + + name = 'yday' + subfmts = (('date_hms', + '%Y:%j:%H:%M:%S', + '{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}'), + ('date_hm', + '%Y:%j:%H:%M', + '{year:d}:{yday:03d}:{hour:02d}:{min:02d}'), + ('date', + '%Y:%j', + '{year:d}:{yday:03d}')) + + +class TimeFITS(TimeString): + """ + FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]][(SCALE[(REALIZATION)])]". + + ISOT with two extensions: + - Can give signed five-digit year (mostly for negative years); + - A possible time scale (and realization) appended in parentheses. + + Note: FITS supports some deprecated names for timescales; these are + translated to the formal names upon initialization. Furthermore, any + specific realization information is stored only as long as the time scale + is not changed. + + The allowed subformats are: + + - 'date_hms': date + hours, mins, secs (and optional fractional secs) + - 'date': date + - 'longdate_hms': as 'date_hms', but with signed 5-digit year + - 'longdate': as 'date', but with signed 5-digit year + + See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583). + """ + name = 'fits' + subfmts = ( + ('date_hms', + (r'(?P\d{4})-(?P\d\d)-(?P\d\d)T' + r'(?P\d\d):(?P\d\d):(?P\d\d(\.\d*)?)'), + '{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'), + ('date', + r'(?P\d{4})-(?P\d\d)-(?P\d\d)', + '{year:04d}-{mon:02d}-{day:02d}'), + ('longdate_hms', + (r'(?P[+-]\d{5})-(?P\d\d)-(?P\d\d)T' + r'(?P\d\d):(?P\d\d):(?P\d\d(\.\d*)?)'), + '{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'), + ('longdate', + r'(?P[+-]\d{5})-(?P\d\d)-(?P\d\d)', + '{year:+06d}-{mon:02d}-{day:02d}')) + # Add the regex that parses the scale and possible realization. + subfmts = tuple( + (subfmt[0], + subfmt[1] + r'(\((?P\w+)(\((?P\w+)\))?\))?', + subfmt[2]) for subfmt in subfmts) + _fits_scale = None + _fits_realization = None + + def parse_string(self, timestr, subfmts): + """Read time and set scale according to trailing scale codes.""" + # Try parsing with any of the allowed sub-formats. + for _, regex, _ in subfmts: + tm = re.match(regex, timestr) + if tm: + break + else: + raise ValueError('Time {0} does not match {1} format' + .format(timestr, self.name)) + tm = tm.groupdict() + if tm['scale'] is not None: + # If a scale was given, translate from a possible deprecated + # timescale identifier to the scale used by Time. + fits_scale = tm['scale'].upper() + scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower()) + if scale not in TIME_SCALES: + raise ValueError("Scale {0!r} is not in the allowed scales {1}" + .format(scale, sorted(TIME_SCALES))) + # If no scale was given in the initialiser, set the scale to + # that given in the string. Also store a possible realization, + # so we can round-trip (as long as no scale changes are made). + fits_realization = (tm['realization'].upper() + if tm['realization'] else None) + if self._fits_scale is None: + self._fits_scale = fits_scale + self._fits_realization = fits_realization + if self._scale is None: + self._scale = scale + if (scale != self.scale or fits_scale != self._fits_scale or + fits_realization != self._fits_realization): + raise ValueError("Input strings for {0} class must all " + "have consistent time scales." + .format(self.name)) + return [int(tm['year']), int(tm['mon']), int(tm['mday']), + int(tm.get('hour', 0)), int(tm.get('min', 0)), + float(tm.get('sec', 0.))] + + def format_string(self, str_fmt, **kwargs): + """Format time-string: append the scale to the normal ISOT format.""" + time_str = super(TimeFITS, self).format_string(str_fmt, **kwargs) + if self._fits_scale and self._fits_realization: + return '{0}({1}({2}))'.format(time_str, self._fits_scale, + self._fits_realization) + else: + return '{0}({1})'.format(time_str, self._scale.upper()) + + @property + def value(self): + """Convert times to strings, using signed 5 digit if necessary.""" + if 'long' not in self.out_subfmt: + # If we have times before year 0 or after year 9999, we can + # output only in a "long" format, using signed 5-digit years. + jd = self.jd1 + self.jd2 + if jd.min() < 1721425.5 or jd.max() >= 5373484.5: + self.out_subfmt = 'long' + self.out_subfmt + return super(TimeFITS, self).value + + +class TimeEpochDate(TimeFormat): + """ + Base class for support floating point Besselian and Julian epoch dates + """ + + def set_jds(self, val1, val2): + self._check_scale(self._scale) # validate scale. + epoch_to_jd = getattr(erfa, self.epoch_to_jd) + jd1, jd2 = epoch_to_jd(val1 + val2) + self.jd1, self.jd2 = day_frac(jd1, jd2) + + @property + def value(self): + jd_to_epoch = getattr(erfa, self.jd_to_epoch) + return jd_to_epoch(self.jd1, self.jd2) + + +class TimeBesselianEpoch(TimeEpochDate): + """Besselian Epoch year as floating point value(s) like 1950.0""" + name = 'byear' + epoch_to_jd = 'epb2jd' + jd_to_epoch = 'epb' + + def _check_val_type(self, val1, val2): + """Input value validation, typically overridden by derived classes""" + if hasattr(val1, 'to') and hasattr(val1, 'unit'): + raise ValueError("Cannot use Quantities for 'byear' format, " + "as the interpretation would be ambiguous. " + "Use float with Besselian year instead. ") + + return super(TimeBesselianEpoch, self)._check_val_type(val1, val2) + + +class TimeJulianEpoch(TimeEpochDate): + """Julian Epoch year as floating point value(s) like 2000.0""" + name = 'jyear' + unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities + epoch_to_jd = 'epj2jd' + jd_to_epoch = 'epj' + + +class TimeEpochDateString(TimeString): + """ + Base class to support string Besselian and Julian epoch dates + such as 'B1950.0' or 'J2000.0' respectively. + """ + + def set_jds(self, val1, val2): + epoch_prefix = self.epoch_prefix + iterator = np.nditer([val1, None], op_dtypes=[val1.dtype, np.double]) + for val, years in iterator: + time_str = val.item() + try: + epoch_type, year_str = time_str[0], time_str[1:] + year = float(year_str) + if epoch_type.upper() != epoch_prefix: + raise ValueError + except (IndexError, ValueError): + raise ValueError('Time {0} does not match {1} format' + .format(time_str, self.name)) + else: + years[...] = year + + self._check_scale(self._scale) # validate scale. + epoch_to_jd = getattr(erfa, self.epoch_to_jd) + jd1, jd2 = epoch_to_jd(iterator.operands[-1]) + self.jd1, self.jd2 = day_frac(jd1, jd2) + + @property + def value(self): + jd_to_epoch = getattr(erfa, self.jd_to_epoch) + years = jd_to_epoch(self.jd1, self.jd2) + # Use old-style format since it is a factor of 2 faster + str_fmt = self.epoch_prefix + '%.' + str(self.precision) + 'f' + outs = [str_fmt % year for year in years.flat] + return np.array(outs).reshape(self.jd1.shape) + + +class TimeBesselianEpochString(TimeEpochDateString): + """Besselian Epoch year as string value(s) like 'B1950.0'""" + name = 'byear_str' + epoch_to_jd = 'epb2jd' + jd_to_epoch = 'epb' + epoch_prefix = 'B' + + +class TimeJulianEpochString(TimeEpochDateString): + """Julian Epoch year as string value(s) like 'J2000.0'""" + name = 'jyear_str' + epoch_to_jd = 'epj2jd' + jd_to_epoch = 'epj' + epoch_prefix = 'J' + + +class TimeDeltaFormatMeta(TimeFormatMeta): + _registry = TIME_DELTA_FORMATS + + +@six.add_metaclass(TimeDeltaFormatMeta) +class TimeDeltaFormat(TimeFormat): + """Base class for time delta representations""" + + def _check_scale(self, scale): + """ + Check that the scale is in the allowed list of scales, or is `None` + """ + if scale is not None and scale not in TIME_DELTA_SCALES: + raise ScaleValueError("Scale value '{0}' not in " + "allowed values {1}" + .format(scale, TIME_DELTA_SCALES)) + + return scale + + def set_jds(self, val1, val2): + self._check_scale(self._scale) # Validate scale. + self.jd1, self.jd2 = day_frac(val1, val2, divisor=1./self.unit) + + @property + def value(self): + return (self.jd1 + self.jd2) / self.unit + + +class TimeDeltaSec(TimeDeltaFormat): + """Time delta in SI seconds""" + name = 'sec' + unit = 1. / erfa.DAYSEC # for quantity input + + +class TimeDeltaJD(TimeDeltaFormat): + """Time delta in Julian days (86400 SI seconds)""" + name = 'jd' + unit = 1. + + +from .core import Time, TIME_SCALES, TIME_DELTA_SCALES, ScaleValueError diff --git a/astropy/time/setup_package.py b/astropy/time/setup_package.py new file mode 100644 index 0000000..3cd9f7c --- /dev/null +++ b/astropy/time/setup_package.py @@ -0,0 +1,5 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + + +def requires_2to3(): + return False diff --git a/astropy/time/tests/__init__.py b/astropy/time/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/astropy/time/tests/test_basic.py b/astropy/time/tests/test_basic.py new file mode 100644 index 0000000..a4f4c16 --- /dev/null +++ b/astropy/time/tests/test_basic.py @@ -0,0 +1,1141 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst + +# TEST_UNICODE_LITERALS + +import copy +import functools +import datetime +from copy import deepcopy + +import pytest +import numpy as np + +from ...tests.helper import catch_warnings, remote_data +from ...extern import six +from ...extern.six.moves import zip +from ...utils import isiterable +from .. import Time, ScaleValueError, TIME_SCALES, TimeString, TimezoneInfo +from ...coordinates import EarthLocation +from ... import units as u +from ... import _erfa as erfa +try: + import pytz + HAS_PYTZ = True +except ImportError: + HAS_PYTZ = False + +allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0) +allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52, + atol=2. ** -52) # 20 ps atol +allclose_sec = functools.partial(np.allclose, rtol=2. ** -52, + atol=2. ** -52 * 24 * 3600) # 20 ps atol +allclose_year = functools.partial(np.allclose, rtol=2. ** -52, + atol=0.) # 14 microsec at current epoch + + +def setup_function(func): + func.FORMATS_ORIG = deepcopy(Time.FORMATS) + + +def teardown_function(func): + Time.FORMATS.clear() + Time.FORMATS.update(func.FORMATS_ORIG) + + +class TestBasic(): + """Basic tests stemming from initial example and API reference""" + + def test_simple(self): + times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'] + t = Time(times, format='iso', scale='utc') + assert (repr(t) == "